[llvm] b3bec6e - [RISCV] Use vnsrl.wx with x0 instead of vnsrl.vi for truncate.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 26 18:38:21 PST 2022


Author: Craig Topper
Date: 2022-01-26T18:38:13-08:00
New Revision: b3bec6e4530b7540cc3ae3084d5adfe76a159d06

URL: https://github.com/llvm/llvm-project/commit/b3bec6e4530b7540cc3ae3084d5adfe76a159d06
DIFF: https://github.com/llvm/llvm-project/commit/b3bec6e4530b7540cc3ae3084d5adfe76a159d06.diff

LOG: [RISCV] Use vnsrl.wx with x0 instead of vnsrl.vi for truncate.

This matches what the spec uses for the vncvt.x.x.w assembly
pseudoinstruction.

Reviewed By: kito-cheng

Differential Revision: https://reviews.llvm.org/D118295

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
    llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
    llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
    llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 5cff16c32fe78..964f0fa54512b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -770,8 +770,8 @@ foreach vtiTowti = AllWidenableIntVectors in {
   def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1),
                                                (vti.Mask true_mask),
                                                VLOpFrag)),
-            (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX)
-                wti.RegClass:$rs1, 0, GPR:$vl, vti.Log2SEW)>;
+            (!cast<Instruction>("PseudoVNSRL_WX_"#vti.LMul.MX)
+                wti.RegClass:$rs1, X0, GPR:$vl, vti.Log2SEW)>;
 
   def : Pat<(vti.Vector
              (riscv_trunc_vector_vl

diff  --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
index 5850c7c1937e3..62d38dc809bda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
@@ -21,7 +21,7 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
 ; RV32-NEXT:    lui a0, 1048568
 ; RV32-NEXT:    vand.vx v8, v8, a0
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: fixedlen:
@@ -32,7 +32,7 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    vand.vx v8, v8, a0
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    ret
   %v41 = insertelement <2 x i32> undef, i32 16, i32 0
   %v42 = shufflevector <2 x i32> %v41, <2 x i32> undef, <2 x i32> zeroinitializer
@@ -51,7 +51,7 @@ define <vscale x 2 x i16> @scalable(<vscale x 2 x i32> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    lui a0, 1048568
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index 7fb412094054c..e307e2630f3cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -60,9 +60,9 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v9, v9
 ; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -76,9 +76,9 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v9, v9
 ; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -145,9 +145,9 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v9, v9
 ; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -161,9 +161,9 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v9, v9
 ; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -230,9 +230,9 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v10, v10
 ; RV32D-NEXT:    vsrl.vi v10, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v10, 0
+; RV32D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -246,9 +246,9 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v10, v10
 ; RV64D-NEXT:    vsrl.vi v10, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v10, 0
+; RV64D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -315,9 +315,9 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v12, v12
 ; RV32D-NEXT:    vsrl.vi v12, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v12, 0
+; RV32D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v10, 0
+; RV32D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -331,9 +331,9 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v12, v12
 ; RV64D-NEXT:    vsrl.vi v12, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v12, 0
+; RV64D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v10, 0
+; RV64D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -400,9 +400,9 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v16, v16
 ; RV32D-NEXT:    vsrl.vi v16, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v16, 0
+; RV32D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v12, 0
+; RV32D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
@@ -416,9 +416,9 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v16, v16
 ; RV64D-NEXT:    vsrl.vi v16, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v16, 0
+; RV64D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v12, 0
+; RV64D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
@@ -561,7 +561,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -576,7 +576,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -662,7 +662,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -677,7 +677,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -763,7 +763,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v10, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v10, 0
+; RV32D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -778,7 +778,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v10, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v10, 0
+; RV64D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -864,7 +864,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vi v12, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v12, 0
+; RV32D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -879,7 +879,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vi v12, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v12, 0
+; RV64D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v10, v10, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -965,7 +965,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vi v16, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v16, 0
+; RV32D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -980,7 +980,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vi v16, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v16, 0
+; RV64D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v12, v12, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1144,7 +1144,7 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v9, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1160,7 +1160,7 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v9, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v9, 0
+; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1253,7 +1253,7 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v10, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v10, 0
+; RV32D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1269,7 +1269,7 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v10, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v9, v10, 0
+; RV64D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1362,7 +1362,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v12, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v12, 0
+; RV32D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1378,7 +1378,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v12, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v12, 0
+; RV64D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v10, v10, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1471,7 +1471,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v16, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v16, 0
+; RV32D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1487,7 +1487,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v16, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v16, 0
+; RV64D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v12, v12, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -2048,9 +2048,9 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2062,9 +2062,9 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2128,9 +2128,9 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2142,9 +2142,9 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2208,9 +2208,9 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v10
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v10, 0
+; RV32D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2222,9 +2222,9 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v10
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v10, 0
+; RV64D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2288,9 +2288,9 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v12
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v12, 0
+; RV32D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2302,9 +2302,9 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v12
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v12, 0
+; RV64D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2368,9 +2368,9 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v16
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v16, 0
+; RV32D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2382,9 +2382,9 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v16
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v16, 0
+; RV64D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2522,7 +2522,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2534,7 +2534,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2616,7 +2616,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2628,7 +2628,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2710,7 +2710,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
@@ -2722,7 +2722,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
@@ -2804,7 +2804,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
@@ -2816,7 +2816,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
@@ -2898,7 +2898,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
@@ -2910,7 +2910,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
@@ -3069,7 +3069,7 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -3082,7 +3082,7 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -3171,7 +3171,7 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
@@ -3184,7 +3184,7 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
@@ -3273,7 +3273,7 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
@@ -3286,7 +3286,7 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
@@ -3375,7 +3375,7 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
@@ -3388,7 +3388,7 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
index 3326407581171..73e8585fe3431 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
@@ -59,9 +59,9 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -79,9 +79,9 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -146,9 +146,9 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -166,9 +166,9 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -233,9 +233,9 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v10
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v10, 0
+; RV32D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -253,9 +253,9 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v10
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v10, 0
+; RV64D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -320,9 +320,9 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v12
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v12, 0
+; RV32D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -340,9 +340,9 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v12
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v12, 0
+; RV64D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -407,9 +407,9 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v16
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v16, 0
+; RV32D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -427,9 +427,9 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v16
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v16, 0
+; RV64D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -559,7 +559,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
@@ -577,7 +577,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
@@ -656,7 +656,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
@@ -674,7 +674,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
@@ -753,7 +753,7 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
 ; RV32D-NEXT:    li a0, 16
@@ -771,7 +771,7 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    li a0, 16
@@ -850,7 +850,7 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
 ; RV32D-NEXT:    li a0, 16
@@ -868,7 +868,7 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    li a0, 16
@@ -947,7 +947,7 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
 ; RV32D-NEXT:    li a0, 16
@@ -965,7 +965,7 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    li a0, 16
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v9, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v9, 0
+; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1126,7 +1126,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1206,7 +1206,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v10, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v9, v10, 0
+; RV32D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1226,7 +1226,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1306,7 +1306,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v12, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v12, 0
+; RV32D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1326,7 +1326,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1406,7 +1406,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v16, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v16, 0
+; RV32D-NEXT:    vnsrl.wx v12, v16, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1426,7 +1426,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1889,9 +1889,9 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -1906,9 +1906,9 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -1969,9 +1969,9 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -1986,9 +1986,9 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2049,9 +2049,9 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v10
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v10, 0
+; RV32D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2066,9 +2066,9 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v10
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v10, 0
+; RV64D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2129,9 +2129,9 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v12
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v12, 0
+; RV32D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2146,9 +2146,9 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v12
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v12, 0
+; RV64D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2209,9 +2209,9 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v16
 ; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v16, 0
+; RV32D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2226,9 +2226,9 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v16
 ; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v16, 0
+; RV64D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2352,7 +2352,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2366,7 +2366,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2440,7 +2440,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2454,7 +2454,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2528,7 +2528,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
@@ -2542,7 +2542,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
@@ -2616,7 +2616,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
@@ -2630,7 +2630,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
@@ -2704,7 +2704,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vi v8, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
@@ -2718,7 +2718,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vi v8, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
@@ -2855,7 +2855,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v8, v8, 0
+; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2870,7 +2870,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v8, v8, 0
+; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2947,7 +2947,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wi v10, v8, 0
+; RV32D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
@@ -2962,7 +2962,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wi v10, v8, 0
+; RV64D-NEXT:    vnsrl.wx v10, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
@@ -3039,7 +3039,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wi v12, v8, 0
+; RV32D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
@@ -3054,7 +3054,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wi v12, v8, 0
+; RV64D-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
@@ -3131,7 +3131,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wi v16, v8, 0
+; RV32D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
@@ -3146,7 +3146,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wi v16, v8, 0
+; RV64D-NEXT:    vnsrl.wx v16, v8, zero
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
index 5ecfae181a707..17b5df3237136 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
@@ -410,7 +410,7 @@ define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, <vscale x 1 x i8>*
 ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
@@ -474,7 +474,7 @@ define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, <vscale x 2 x i8>*
 ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i16> %x to <vscale x 2 x i8>
@@ -538,7 +538,7 @@ define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, <vscale x 4 x i8>*
 ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i16> %x to <vscale x 4 x i8>
@@ -598,7 +598,7 @@ define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, <vscale x 8 x i8>*
 ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i16> %x to <vscale x 8 x i8>
@@ -658,7 +658,7 @@ define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, <vscale x 16 x
 ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i16> %x to <vscale x 16 x i8>
@@ -694,7 +694,7 @@ define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, <vscale x 32 x
 ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 32 x i16> %x to <vscale x 32 x i8>
@@ -706,9 +706,9 @@ define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, <vscale x 1 x i8>*
 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i8>
@@ -720,7 +720,7 @@ define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, <vscale x 1 x i16
 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
@@ -758,9 +758,9 @@ define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, <vscale x 2 x i8>*
 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i8>
@@ -772,7 +772,7 @@ define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, <vscale x 2 x i16
 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i16>
@@ -808,9 +808,9 @@ define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, <vscale x 4 x i8>*
 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i8>
@@ -822,7 +822,7 @@ define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, <vscale x 4 x i16
 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i16>
@@ -858,9 +858,9 @@ define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, <vscale x 8 x i8>*
 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i8>
@@ -872,7 +872,7 @@ define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, <vscale x 8 x i16
 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i16>
@@ -908,9 +908,9 @@ define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, <vscale x 16 x
 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i8>
@@ -922,7 +922,7 @@ define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, <vscale x 16 x
 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i16>
@@ -934,11 +934,11 @@ define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, <vscale x 1 x i8>*
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i8>
@@ -950,9 +950,9 @@ define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, <vscale x 1 x i16
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i16>
@@ -964,7 +964,7 @@ define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, <vscale x 1 x i32
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
@@ -976,11 +976,11 @@ define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, <vscale x 2 x i8>*
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i8>
@@ -992,9 +992,9 @@ define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, <vscale x 2 x i16
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i16>
@@ -1006,7 +1006,7 @@ define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, <vscale x 2 x i32
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
@@ -1018,11 +1018,11 @@ define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, <vscale x 4 x i8>*
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i8>
@@ -1034,9 +1034,9 @@ define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, <vscale x 4 x i16
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i16>
@@ -1048,7 +1048,7 @@ define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, <vscale x 4 x i32
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
@@ -1060,11 +1060,11 @@ define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, <vscale x 8 x i8>*
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i8>
@@ -1076,9 +1076,9 @@ define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, <vscale x 8 x i16
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i16>
@@ -1090,7 +1090,7 @@ define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, <vscale x 8 x i32
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index a67d54f8e7275..e90ee2480d80e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -125,7 +125,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX8-RV32-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX8-RV32-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX8-RV32-NEXT:    li a1, 134
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v8, v9, a1
@@ -143,7 +143,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX8-RV64-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX8-RV64-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX8-RV64-NEXT:    li a1, 134
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v8, v9, a1
@@ -1045,7 +1045,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX8-RV32-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX8-RV32-NEXT:    li a1, 134
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v8, v10, a1
@@ -1064,7 +1064,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX8-RV64-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX8-RV64-NEXT:    li a1, 134
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v8, v10, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
index 3059d1777c362..9acfb3cb7ef81 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
@@ -115,7 +115,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX8-RV32-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX8-RV32-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX8-RV32-NEXT:    li a1, 127
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vsub.vx v8, v9, a1
@@ -135,7 +135,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX8-RV64-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX8-RV64-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX8-RV64-NEXT:    li a1, 127
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vsub.vx v8, v9, a1
@@ -927,7 +927,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX8-RV32-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX8-RV32-NEXT:    li a1, 127
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vsub.vx v8, v10, a1
@@ -948,7 +948,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX8-RV64-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX8-RV64-NEXT:    li a1, 127
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vsub.vx v8, v10, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
index df657d83d5e7f..7a0b183984049 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
@@ -573,7 +573,7 @@ define void @truncstore_v2i16_v2i8(<2 x i16> %x, <2 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v2i16_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i16> %x to <2 x i8>
@@ -637,7 +637,7 @@ define void @truncstore_v4i16_v4i8(<4 x i16> %x, <4 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v4i16_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <4 x i16> %x to <4 x i8>
@@ -723,7 +723,7 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, <8 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v8i16_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <8 x i16> %x to <8 x i8>
@@ -847,8 +847,8 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i16_v16i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -857,7 +857,7 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i16_v16i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX4-NEXT:    vse8.v v10, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <16 x i16> %x to <16 x i8>
@@ -1031,9 +1031,9 @@ define void @truncstore_v2i32_v2i8(<2 x i32> %x, <2 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v2i32_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i32> %x to <2 x i8>
@@ -1045,7 +1045,7 @@ define void @truncstore_v2i32_v2i16(<2 x i32> %x, <2 x i16>* %z) {
 ; CHECK-LABEL: truncstore_v2i32_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i32> %x to <2 x i16>
@@ -1083,9 +1083,9 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v4i32_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <4 x i32> %x to <4 x i8>
@@ -1097,7 +1097,7 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, <4 x i16>* %z) {
 ; CHECK-LABEL: truncstore_v4i32_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <4 x i32> %x to <4 x i16>
@@ -1157,13 +1157,13 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i32_v8i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1172,9 +1172,9 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i32_v8i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v10, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v10, zero
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i32> %x to <8 x i8>
@@ -1186,8 +1186,8 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i32_v8i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
@@ -1196,7 +1196,7 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i32_v8i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX4-NEXT:    vse16.v v10, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i32> %x to <8 x i16>
@@ -1270,25 +1270,25 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i32_v16i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 12
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1297,9 +1297,9 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i32_v16i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v12, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v12, zero
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <16 x i32> %x to <16 x i8>
@@ -1311,13 +1311,13 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i32_v16i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT:    vnsrl.wi v10, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -1328,7 +1328,7 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i32_v16i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
 ; LMULMAX4-NEXT:    vse16.v v12, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <16 x i32> %x to <16 x i16>
@@ -1436,11 +1436,11 @@ define void @truncstore_v2i64_v2i8(<2 x i64> %x, <2 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v2i64_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i64> %x to <2 x i8>
@@ -1452,9 +1452,9 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, <2 x i16>* %z) {
 ; CHECK-LABEL: truncstore_v2i64_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i64> %x to <2 x i16>
@@ -1466,7 +1466,7 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, <2 x i32>* %z) {
 ; CHECK-LABEL: truncstore_v2i64_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i64> %x to <2 x i32>
@@ -1478,17 +1478,17 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v4i64_v4i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1497,11 +1497,11 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v4i64_v4i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v10, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v10, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <4 x i64> %x to <4 x i8>
@@ -1513,13 +1513,13 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v4i64_v4i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
@@ -1528,9 +1528,9 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v4i64_v4i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v10, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v10, zero
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <4 x i64> %x to <4 x i16>
@@ -1542,8 +1542,8 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
 ; LMULMAX1-LABEL: truncstore_v4i64_v4i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
@@ -1552,7 +1552,7 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
 ; LMULMAX4-LABEL: truncstore_v4i64_v4i32:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX4-NEXT:    vse32.v v10, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <4 x i64> %x to <4 x i32>
@@ -1564,33 +1564,33 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i64_v8i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1599,11 +1599,11 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i64_v8i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v12, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v12, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i64> %x to <8 x i8>
@@ -1615,25 +1615,25 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i64_v8i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
@@ -1642,9 +1642,9 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i64_v8i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v12, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v12, zero
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i64> %x to <8 x i16>
@@ -1656,13 +1656,13 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i64_v8i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT:    vnsrl.wi v10, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -1673,7 +1673,7 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i64_v8i32:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i64> %x to <8 x i32>
@@ -1685,65 +1685,65 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i64_v16i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v12, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 10, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v13, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v13, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 10
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v14, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v14, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 14, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 12
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v15, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v15, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 14
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1752,17 +1752,17 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i64_v16i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v16, v12, 0
+; LMULMAX4-NEXT:    vnsrl.wx v16, v12, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v16, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v16, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v12, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v12, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v14, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v14, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v14, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v14, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX4-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
@@ -1776,47 +1776,47 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i64_v16i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v13, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v13, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v10, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v14, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v14, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v15, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v15, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 6
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -1827,13 +1827,13 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i64_v16i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v16, v12, 0
+; LMULMAX4-NEXT:    vnsrl.wx v16, v12, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v12, v16, 0
+; LMULMAX4-NEXT:    vnsrl.wx v12, v16, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v14, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v14, v8, zero
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v8, v14, 0
+; LMULMAX4-NEXT:    vnsrl.wx v8, v14, zero
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, tu, mu
 ; LMULMAX4-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
@@ -1847,23 +1847,23 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i64_v16i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
-; LMULMAX1-NEXT:    vnsrl.wi v10, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v13, 0
-; LMULMAX1-NEXT:    vnsrl.wi v11, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v13, zero
+; LMULMAX1-NEXT:    vnsrl.wx v11, v12, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v11, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v15, 0
-; LMULMAX1-NEXT:    vnsrl.wi v12, v14, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v15, zero
+; LMULMAX1-NEXT:    vnsrl.wx v12, v14, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v9, 2
 ; LMULMAX1-NEXT:    addi a1, a0, 48
@@ -1878,8 +1878,8 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i64_v16i32:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wi v16, v12, 0
-; LMULMAX4-NEXT:    vnsrl.wi v12, v8, 0
+; LMULMAX4-NEXT:    vnsrl.wx v16, v12, zero
+; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, tu, mu
 ; LMULMAX4-NEXT:    vslideup.vi v12, v16, 8
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 0f3efd2daaacd..0cc70c9913006 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -348,9 +348,9 @@ define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -367,9 +367,9 @@ define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -410,9 +410,9 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wi v8, v12, 0
+; LMULMAX8-NEXT:    vnsrl.wx v8, v12, zero
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX8-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX8-NEXT:    vse8.v v8, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
@@ -429,31 +429,31 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v10, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v11, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v11, v12, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v11, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v11, v11, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse8.v v10, (a1)
@@ -472,9 +472,9 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wi v8, v12, 0
+; LMULMAX8-NEXT:    vnsrl.wx v8, v12, zero
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX8-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX8-NEXT:    vse8.v v8, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
@@ -491,31 +491,31 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v10, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v10, v12, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v10, v10, 0
+; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v11, v12, 0
+; LMULMAX1-NEXT:    vnsrl.wx v11, v12, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v11, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v11, v11, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v11, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse8.v v10, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index 4482c653ad4d1..59b4ba7fa8058 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -169,9 +169,9 @@ define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
@@ -186,9 +186,9 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX8-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wi v8, v10, 0
+; LMULMAX8-NEXT:    vnsrl.wx v8, v10, zero
 ; LMULMAX8-NEXT:    vse8.v v8, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
@@ -197,9 +197,9 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX2-NEXT:    vnsrl.wi v10, v8, 0
+; LMULMAX2-NEXT:    vnsrl.wx v10, v8, zero
 ; LMULMAX2-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX2-NEXT:    vnsrl.wi v8, v10, 0
+; LMULMAX2-NEXT:    vnsrl.wx v8, v10, zero
 ; LMULMAX2-NEXT:    vse8.v v8, (a1)
 ; LMULMAX2-NEXT:    ret
 ;
@@ -210,13 +210,13 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
+; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
+; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse8.v v8, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index c3f7772878d44..65e66cd230583 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -1037,7 +1037,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1064,7 +1064,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1115,7 +1115,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1142,7 +1142,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1192,7 +1192,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1219,7 +1219,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1245,7 +1245,7 @@ define <8 x i64> @mgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1907,7 +1907,7 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %id
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1934,7 +1934,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %id
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1985,7 +1985,7 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2012,7 +2012,7 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2062,7 +2062,7 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2089,7 +2089,7 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2115,7 +2115,7 @@ define <8 x double> @mgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index f357ae3575122..eb542774134f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -44,14 +44,14 @@ define void @mscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: mscatter_v2i16_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i16> %val to <2 x i8>
@@ -63,18 +63,18 @@ define void @mscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: mscatter_v2i32_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i32> %val to <2 x i8>
@@ -86,22 +86,22 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: mscatter_v2i64_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i64> %val to <2 x i8>
@@ -236,14 +236,14 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <
 ; RV32-LABEL: mscatter_v2i32_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i32> %val to <2 x i16>
@@ -255,18 +255,18 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <
 ; RV32-LABEL: mscatter_v2i64_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i64> %val to <2 x i16>
@@ -474,14 +474,14 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <
 ; RV32-LABEL: mscatter_v2i64_truncstore_v2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i64> %val to <2 x i32>
@@ -843,7 +843,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -868,7 +868,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -915,7 +915,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -940,7 +940,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -986,7 +986,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1011,7 +1011,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1035,7 +1035,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1637,7 +1637,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1662,7 +1662,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1709,7 +1709,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1734,7 +1734,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1780,7 +1780,7 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1805,7 +1805,7 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1829,7 +1829,7 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index bfc52bbce7242..e88fb6e71dcfd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -965,7 +965,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
 ; RV32-NEXT:    vsext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -991,7 +991,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
 ; RV32-NEXT:    vzext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1040,7 +1040,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vsext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1066,7 +1066,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vzext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1114,7 +1114,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vsext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1140,7 +1140,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vzext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1165,7 +1165,7 @@ define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1702,7 +1702,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %i
 ; RV32-NEXT:    vsext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1728,7 +1728,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %i
 ; RV32-NEXT:    vzext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1777,7 +1777,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16>
 ; RV32-NEXT:    vsext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1803,7 +1803,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16>
 ; RV32-NEXT:    vzext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1851,7 +1851,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32>
 ; RV32-NEXT:    vsext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1877,7 +1877,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32>
 ; RV32-NEXT:    vzext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1902,7 +1902,7 @@ define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v8, 0
+; RV32-NEXT:    vnsrl.wx v12, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -2058,7 +2058,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v16, 0
+; RV32-NEXT:    vnsrl.wx v12, v16, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v12, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2069,7 +2069,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v24, 0
+; RV32-NEXT:    vnsrl.wx v4, v24, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v10
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2132,7 +2132,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v12, v16, 0
+; RV32-NEXT:    vnsrl.wx v12, v16, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v12, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2143,7 +2143,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v24, 0
+; RV32-NEXT:    vnsrl.wx v4, v24, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v10
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2270,7 +2270,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v16, 0
+; RV32-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v8, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2281,7 +2281,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v24, 0
+; RV32-NEXT:    vnsrl.wx v4, v24, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2344,7 +2344,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v16, 0
+; RV32-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v8, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2355,7 +2355,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v24, 0
+; RV32-NEXT:    vnsrl.wx v4, v24, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2481,7 +2481,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v8, 0
+; RV32-NEXT:    vnsrl.wx v4, v8, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v4, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2492,7 +2492,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v1
 ; RV32-NEXT:    vluxei32.v v8, (a0), v24, v0.t
@@ -2555,7 +2555,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v8, 0
+; RV32-NEXT:    vnsrl.wx v4, v8, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v4, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2566,7 +2566,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v1
 ; RV32-NEXT:    vluxei32.v v8, (a0), v24, v0.t
@@ -2624,7 +2624,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vnsrl.wx v28, v16, zero
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    li a2, 16
@@ -2635,7 +2635,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v28, v8, 0
+; RV32-NEXT:    vnsrl.wx v28, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v24
 ; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index 5fe4ad0cfe635..5255910217f21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -26,7 +26,7 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -34,7 +34,7 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -47,9 +47,9 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -57,9 +57,9 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -72,11 +72,11 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -84,11 +84,11 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
 ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -194,7 +194,7 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
 ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -202,7 +202,7 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
 ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -215,9 +215,9 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
 ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -225,9 +225,9 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
 ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -442,7 +442,7 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
 ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -450,7 +450,7 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
 ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -779,7 +779,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -805,7 +805,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -854,7 +854,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -880,7 +880,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -928,7 +928,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -954,7 +954,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -979,7 +979,7 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1500,7 +1500,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1526,7 +1526,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1575,7 +1575,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1601,7 +1601,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1649,7 +1649,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1675,7 +1675,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1700,7 +1700,7 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli a2, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v12, 0
+; RV32-NEXT:    vnsrl.wx v16, v12, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1932,7 +1932,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vsext.vf2 v16, v24
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a4, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    addi a1, a2, -16
 ; RV32-NEXT:    csrr a4, vlenb
@@ -1950,7 +1950,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 4
@@ -2078,7 +2078,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vzext.vf2 v16, v24
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli a4, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    addi a1, a2, -16
 ; RV32-NEXT:    csrr a4, vlenb
@@ -2096,7 +2096,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 4

diff  --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
index 7c423a20a0629..302174e5aadf5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
@@ -6,9 +6,9 @@ define <vscale x 4 x i5> @trunc_nxv4i32_to_nxv4i5(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %v = trunc <vscale x 4 x i32> %a to <vscale x 4 x i5>
   ret <vscale x 4 x i5> %v
@@ -18,9 +18,9 @@ define <vscale x 1 x i5> @trunc_nxv1i32_to_nxv1i5(<vscale x 1 x i32> %a) {
 ; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %v = trunc <vscale x 1 x i32> %a to <vscale x 1 x i5>
   ret <vscale x 1 x i5> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 31731af18b6b3..50773c42966b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1043,7 +1043,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsca
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1070,7 +1070,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsca
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1121,7 +1121,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1148,7 +1148,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1198,7 +1198,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1225,7 +1225,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1251,7 +1251,7 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1979,7 +1979,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base,
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2006,7 +2006,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base,
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2057,7 +2057,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2084,7 +2084,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2134,7 +2134,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2161,7 +2161,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2187,7 +2187,7 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8f64(double* %base, <vscale x 8
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index f82dcca4ce65b..90cc7bf501bbb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -44,14 +44,14 @@ define void @mscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale
 ; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
@@ -63,18 +63,18 @@ define void @mscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale
 ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
@@ -86,22 +86,22 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale
 ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v11, v8, 0
+; RV32-NEXT:    vnsrl.wx v11, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v11, 0
+; RV32-NEXT:    vnsrl.wx v8, v11, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v12, v8, 0
+; RV64-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v12, 0
+; RV64-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
@@ -236,14 +236,14 @@ define void @mscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscal
 ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
@@ -255,18 +255,18 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscal
 ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v11, v8, 0
+; RV32-NEXT:    vnsrl.wx v11, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v11, 0
+; RV32-NEXT:    vnsrl.wx v8, v11, zero
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v12, v8, 0
+; RV64-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v12, 0
+; RV64-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
@@ -474,14 +474,14 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscal
 ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v11, v8, 0
+; RV32-NEXT:    vnsrl.wx v11, v8, zero
 ; RV32-NEXT:    vsoxei32.v v11, (zero), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v12, v8, 0
+; RV64-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64-NEXT:    vsoxei64.v v12, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
@@ -843,7 +843,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -868,7 +868,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -915,7 +915,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -940,7 +940,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -986,7 +986,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1011,7 +1011,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1035,7 +1035,7 @@ define void @mscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsca
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1637,7 +1637,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1662,7 +1662,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1709,7 +1709,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1734,7 +1734,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1780,7 +1780,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1805,7 +1805,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1829,7 +1829,7 @@ define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base,
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
index 6884ebc92565c..262ac6d7cd241 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
@@ -636,7 +636,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 1 x float> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -648,7 +648,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 1 x float> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -748,7 +748,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 2 x float> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -760,7 +760,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 2 x float> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -860,7 +860,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 4 x float> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -872,7 +872,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 4 x float> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -972,7 +972,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 8 x float> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -984,7 +984,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 8 x float> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -1084,7 +1084,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 16 x float> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %evec
@@ -1096,7 +1096,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 16 x float> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %evec
@@ -1174,9 +1174,9 @@ define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -1188,9 +1188,9 @@ define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -1202,7 +1202,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %evec
@@ -1214,7 +1214,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v9, 0
+; CHECK-NEXT:    vnsrl.wx v8, v9, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %evec
@@ -1292,9 +1292,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -1306,9 +1306,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -1320,7 +1320,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %evec
@@ -1332,7 +1332,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %evec
@@ -1410,9 +1410,9 @@ define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -1424,9 +1424,9 @@ define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %evec
@@ -1450,7 +1450,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %evec
@@ -1528,9 +1528,9 @@ define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v16, 0
+; CHECK-NEXT:    vnsrl.wx v10, v16, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -1542,9 +1542,9 @@ define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v16, 0
+; CHECK-NEXT:    vnsrl.wx v10, v16, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -1556,7 +1556,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %evec
@@ -1568,7 +1568,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %evec

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index 4c3636e315825..1aa4b914481cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -1056,7 +1056,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1082,7 +1082,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1131,7 +1131,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1157,7 +1157,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1205,7 +1205,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1231,7 +1231,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1256,7 +1256,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i6
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1851,7 +1851,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i8_nxv6f64(double* %base
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1877,7 +1877,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(double* %base
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1926,7 +1926,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(double* %bas
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1952,7 +1952,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i16_nxv6f64(double* %bas
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2000,7 +2000,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i32_nxv6f64(double* %bas
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2026,7 +2026,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i32_nxv6f64(double* %bas
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2051,7 +2051,7 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6f64(double* %base, <vscale x
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2117,7 +2117,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2143,7 +2143,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2192,7 +2192,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %bas
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2218,7 +2218,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %bas
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2266,7 +2266,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %bas
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2292,7 +2292,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %bas
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2317,7 +2317,7 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8f64(double* %base, <vscale x
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2471,7 +2471,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v16, 0
+; RV32-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v8, v0.t
 ; RV32-NEXT:    bltu a1, a2, .LBB104_4
@@ -2481,7 +2481,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v24, 0
+; RV32-NEXT:    vnsrl.wx v4, v24, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2543,7 +2543,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v16, 0
+; RV32-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v8, v0.t
 ; RV32-NEXT:    bltu a1, a2, .LBB105_4
@@ -2553,7 +2553,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v4, v24, 0
+; RV32-NEXT:    vnsrl.wx v4, v24, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index 01be81fd1912d..ca444ee17b0c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -44,7 +44,7 @@ define void @vpscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscal
 ; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -52,7 +52,7 @@ define void @vpscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscal
 ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -65,9 +65,9 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscal
 ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -75,9 +75,9 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscal
 ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -90,11 +90,11 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscal
 ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v11, v8, 0
+; RV32-NEXT:    vnsrl.wx v11, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v11, 0
+; RV32-NEXT:    vnsrl.wx v8, v11, zero
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
@@ -102,11 +102,11 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscal
 ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v12, v8, 0
+; RV64-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v12, 0
+; RV64-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -230,7 +230,7 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vsca
 ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -238,7 +238,7 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vsca
 ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -251,9 +251,9 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vsca
 ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v11, v8, 0
+; RV32-NEXT:    vnsrl.wx v11, v8, zero
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v11, 0
+; RV32-NEXT:    vnsrl.wx v8, v11, zero
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
@@ -261,9 +261,9 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vsca
 ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v12, v8, 0
+; RV64-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v12, 0
+; RV64-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -460,7 +460,7 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vsca
 ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v11, v8, 0
+; RV32-NEXT:    vnsrl.wx v11, v8, zero
 ; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; RV32-NEXT:    vsoxei32.v v11, (zero), v10, v0.t
 ; RV32-NEXT:    ret
@@ -468,7 +468,7 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vsca
 ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v12, v8, 0
+; RV64-NEXT:    vnsrl.wx v12, v8, zero
 ; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; RV64-NEXT:    vsoxei64.v v12, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -815,7 +815,7 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -841,7 +841,7 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -890,7 +890,7 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -916,7 +916,7 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -964,7 +964,7 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -990,7 +990,7 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1015,7 +1015,7 @@ define void @vpscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsc
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1590,7 +1590,7 @@ define void @vpscatter_baseidx_sext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, d
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1616,7 +1616,7 @@ define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, d
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1665,7 +1665,7 @@ define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1691,7 +1691,7 @@ define void @vpscatter_baseidx_zext_nxv6i16_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1739,7 +1739,7 @@ define void @vpscatter_baseidx_sext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1765,7 +1765,7 @@ define void @vpscatter_baseidx_zext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1790,7 +1790,7 @@ define void @vpscatter_baseidx_nxv6f64(<vscale x 6 x double> %val, double* %base
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1855,7 +1855,7 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1881,7 +1881,7 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1930,7 +1930,7 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1956,7 +1956,7 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2004,7 +2004,7 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2030,7 +2030,7 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2055,7 +2055,7 @@ define void @vpscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v16, 0
+; RV32-NEXT:    vnsrl.wx v24, v16, zero
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2232,7 +2232,7 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vsext.vf4 v16, v26
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
 ; RV32-NEXT:    addi a3, sp, 16
 ; RV32-NEXT:    vl8re8.v v8, (a3) # Unknown-size Folded Reload
@@ -2248,7 +2248,7 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a4, e64, m8, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 3
@@ -2348,7 +2348,7 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vzext.vf4 v16, v26
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v24, v8, 0
+; RV32-NEXT:    vnsrl.wx v24, v8, zero
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
 ; RV32-NEXT:    addi a3, sp, 16
 ; RV32-NEXT:    vl8re8.v v8, (a3) # Unknown-size Folded Reload
@@ -2364,7 +2364,7 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v16, v8, 0
+; RV32-NEXT:    vnsrl.wx v16, v8, zero
 ; RV32-NEXT:    vsetvli zero, a4, e64, m8, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 3

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
index 9a4b78a009e34..c756f7b2223f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
@@ -6,7 +6,7 @@ define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %tvec
@@ -16,7 +16,7 @@ define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %tvec
@@ -26,7 +26,7 @@ define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %tvec
@@ -36,7 +36,7 @@ define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
@@ -47,7 +47,7 @@ define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
@@ -58,9 +58,9 @@ define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %tvec
@@ -70,7 +70,7 @@ define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %tvec
@@ -80,9 +80,9 @@ define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %tvec
@@ -92,7 +92,7 @@ define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %tvec
@@ -102,9 +102,9 @@ define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %tvec
@@ -114,7 +114,7 @@ define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
@@ -125,9 +125,9 @@ define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %tvec
@@ -137,7 +137,7 @@ define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
@@ -148,9 +148,9 @@ define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %tvec
@@ -160,7 +160,7 @@ define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
@@ -171,11 +171,11 @@ define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %tvec
@@ -185,9 +185,9 @@ define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %tvec
@@ -197,7 +197,7 @@ define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %tvec
@@ -207,11 +207,11 @@ define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %tvec
@@ -221,9 +221,9 @@ define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %tvec
@@ -233,7 +233,7 @@ define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vnsrl.wx v10, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
@@ -244,11 +244,11 @@ define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vnsrl.wx v8, v8, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %tvec
@@ -258,9 +258,9 @@ define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vnsrl.wx v8, v12, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %tvec
@@ -270,7 +270,7 @@ define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vnsrl.wx v12, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
@@ -281,11 +281,11 @@ define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v10, v16, 0
+; CHECK-NEXT:    vnsrl.wx v10, v16, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v8, v10, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %tvec
@@ -295,9 +295,9 @@ define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vnsrl.wx v8, v16, zero
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %tvec
@@ -307,7 +307,7 @@ define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vnsrl.wx v16, v8, zero
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>


        


More information about the llvm-commits mailing list