[llvm] 9b0f227 - [TableGen][RISCV] Add InstAliases with zero_reg to cover unmasked vnot.v, vncvt.x.x.w, vneg.v, etc.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 22 10:15:03 PDT 2022


Author: Craig Topper
Date: 2022-03-22T10:14:43-07:00
New Revision: 9b0f227d7b2cee4e4492d0501e032f9b33579853

URL: https://github.com/llvm/llvm-project/commit/9b0f227d7b2cee4e4492d0501e032f9b33579853
DIFF: https://github.com/llvm/llvm-project/commit/9b0f227d7b2cee4e4492d0501e032f9b33579853.diff

LOG: [TableGen][RISCV] Add InstAliases with zero_reg to cover unmasked vnot.v, vncvt.x.x.w, vneg.v, etc.

The mask being NoRegister prevented the existing aliases from matching
since NoRegister isn't in the VMV0 register class.

To workaround this I've added new aliases that look for zero_reg.
I had to motify tablegen to generate matching code for zero_reg.
And as a consequence, I had to change the EmitPriority for an ARM
alias that used zero_reg that started printing.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D121496

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/test/CodeGen/RISCV/rvv/aliases.mir
    llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
    llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
    llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
    llvm/test/MC/RISCV/rvv/add.s
    llvm/test/MC/RISCV/rvv/xor.s
    llvm/utils/TableGen/AsmWriterEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index 359df97043cad..e170087e5baec 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -6388,7 +6388,7 @@ def : ARMInstAlias<"neg${s}${p} $Rd, $Rm",
                    (RSBri GPR:$Rd, GPR:$Rm, 0, pred:$p, cc_out:$s)>;
 
 // Pre-v6, 'mov r0, r0' was used as a NOP encoding.
-def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>,
+def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg), 0>,
          Requires<[IsARM, NoV6]>;
 
 // MUL/UMLAL/SMLAL/UMULL/SMULL are available on all arches, but

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 1a6870fc61ff1..893d1f65413e9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -893,6 +893,7 @@ defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
 defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
 
 def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
+def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
 
 // Vector Widening Integer Add/Subtract
 // Refer to 11.2 Widening Vector Arithmetic Instructions
@@ -920,8 +921,12 @@ defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
 
 def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
                 (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
+def : InstAlias<"vwcvt.x.x.v $vd, $vs",
+                (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
 def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
                 (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
+def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
+                (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
 
 // Vector Integer Extension
 defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
@@ -950,6 +955,8 @@ defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
 
 def : InstAlias<"vnot.v $vd, $vs$vm",
                 (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
+def : InstAlias<"vnot.v $vd, $vs",
+                (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
 
 // Vector Single-Width Bit Shift Instructions
 defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>;
@@ -968,6 +975,8 @@ defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
 
 def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
                 (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
+def : InstAlias<"vncvt.x.x.w $vd, $vs",
+                (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
 
 // Vector Integer Comparison Instructions
 let RVVConstraint = NoConstraint in {
@@ -1186,8 +1195,12 @@ defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
 
 def : InstAlias<"vfneg.v $vd, $vs$vm",
                 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
+def : InstAlias<"vfneg.v $vd, $vs",
+                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
 def : InstAlias<"vfabs.v $vd, $vs$vm",
                 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
+def : InstAlias<"vfabs.v $vd, $vs",
+                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
 
 // Vector Floating-Point Compare Instructions
 let RVVConstraint = NoConstraint in {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/aliases.mir b/llvm/test/CodeGen/RISCV/rvv/aliases.mir
index 47b625e111d84..f39b0c73d0943 100644
--- a/llvm/test/CodeGen/RISCV/rvv/aliases.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/aliases.mir
@@ -1,7 +1,6 @@
 # RUN: llc -mtriple riscv32 -mattr=+v -start-after riscv-expand-pseudo -o - %s | FileCheck %s
 # RUN: llc -mtriple riscv64 -mattr=+v -start-after riscv-expand-pseudo -o - %s | FileCheck %s
 
-# FIXME: These should all use the 'vnot' alias despite some not having mask operands
 --- |
   define void @vnot_mask_1() {
     ret void
@@ -45,7 +44,7 @@ body:             |
     liveins: $v25
 
     ; CHECK-LABEL: vnot_no_mask_1:
-    ; CHECK: vxor.vi v25, v25, -1
+    ; CHECK: vnot.v v25, v25
 
     $v25 = VXOR_VI killed $v25, -1, $noreg, implicit $vtype, implicit $vl
 ...
@@ -56,7 +55,7 @@ body:             |
     liveins: $v25
 
     ; CHECK-LABEL: vnot_no_mask_2:
-    ; CHECK: vxor.vi v1, v25, -1
+    ; CHECK: vnot.v v1, v25
 
     $v1 = VXOR_VI killed $v25, -1, $noreg, implicit $vtype, implicit $vl
 ...

diff  --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
index a44e59e2accdc..6b65de40da031 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
@@ -21,7 +21,7 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
 ; RV32-NEXT:    lui a0, 1048568
 ; RV32-NEXT:    vand.vx v8, v8, a0
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: fixedlen:
@@ -32,7 +32,7 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
 ; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    vand.vx v8, v8, a0
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    ret
   %v41 = insertelement <2 x i32> poison, i32 16, i32 0
   %v42 = shufflevector <2 x i32> %v41, <2 x i32> poison, <2 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index b08d67c5f5ebb..ae0b685239bbc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -14,7 +14,7 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -38,7 +38,7 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -61,7 +61,7 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v9
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -76,7 +76,7 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vncvt.x.x.w v9, v9
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -97,7 +97,7 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -121,7 +121,7 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -144,7 +144,7 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v9
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -159,7 +159,7 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vncvt.x.x.w v9, v9
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -180,7 +180,7 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -204,7 +204,7 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -227,7 +227,7 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v9
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -242,7 +242,7 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vncvt.x.x.w v9, v9
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -263,7 +263,7 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -287,7 +287,7 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -310,7 +310,7 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v10, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v10
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
@@ -325,7 +325,7 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v10, zero
+; RV64D-NEXT:    vncvt.x.x.w v9, v10
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
@@ -346,7 +346,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v10
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v10, v10, a0
@@ -370,7 +370,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v10
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v10, v10, a0
@@ -393,7 +393,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v12, zero
+; RV32D-NEXT:    vncvt.x.x.w v10, v12
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
@@ -408,7 +408,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v12, zero
+; RV64D-NEXT:    vncvt.x.x.w v10, v12
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
@@ -429,7 +429,7 @@ define <vscale x 32 x i8> @ctlz_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 4
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
 ; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
@@ -458,7 +458,7 @@ define <vscale x 64 x i8> @ctlz_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 4
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
 ; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
@@ -489,7 +489,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -522,7 +522,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -584,7 +584,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -617,7 +617,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -679,7 +679,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -712,7 +712,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -774,7 +774,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v10
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -807,7 +807,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v10
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -869,7 +869,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v12
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -902,7 +902,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v12
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -964,7 +964,7 @@ define <vscale x 32 x i16> @ctlz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 8
 ; RV32-NEXT:    vor.vv v8, v8, v16
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 5
 ; RV32-NEXT:    addi a0, a0, 1365
@@ -997,7 +997,7 @@ define <vscale x 32 x i16> @ctlz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 8
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 5
 ; RV64-NEXT:    addiw a0, a0, 1365
@@ -1037,7 +1037,7 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -1073,7 +1073,7 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -1104,7 +1104,7 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v9, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v9
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1120,7 +1120,7 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v9, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vncvt.x.x.w v9, v9
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1146,7 +1146,7 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -1182,7 +1182,7 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -1213,7 +1213,7 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v10, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v10, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v10
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1229,7 +1229,7 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v10, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v10, zero
+; RV64D-NEXT:    vncvt.x.x.w v9, v10
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1255,7 +1255,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v10
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -1291,7 +1291,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v10
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -1322,7 +1322,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v12, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v12, zero
+; RV32D-NEXT:    vncvt.x.x.w v10, v12
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1338,7 +1338,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v12, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v12, zero
+; RV64D-NEXT:    vncvt.x.x.w v10, v12
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v10, v10, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1364,7 +1364,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v12
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -1400,7 +1400,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v12
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -1431,7 +1431,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v16, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v16, zero
+; RV32D-NEXT:    vncvt.x.x.w v12, v16
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1447,7 +1447,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v16, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v16, zero
+; RV64D-NEXT:    vncvt.x.x.w v12, v16
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v12, v12, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -1473,7 +1473,7 @@ define <vscale x 16 x i32> @ctlz_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v16
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 349525
 ; RV32-NEXT:    addi a0, a0, 1365
@@ -1509,7 +1509,7 @@ define <vscale x 16 x i32> @ctlz_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 349525
 ; RV64-NEXT:    addiw a0, a0, 1365
@@ -1573,7 +1573,7 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsrl.vi v11, v8, 1
@@ -1612,7 +1612,7 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v9
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI18_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI18_1)
@@ -1677,7 +1677,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsrl.vi v14, v8, 1
@@ -1716,7 +1716,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v10
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI19_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI19_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI19_1)
@@ -1781,7 +1781,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsrl.vi v20, v8, 1
@@ -1820,7 +1820,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v12
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI20_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI20_1)
@@ -1885,7 +1885,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v24, (a0), zero
 ; RV32-NEXT:    vsrl.vi v0, v8, 1
@@ -1924,7 +1924,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI21_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI21_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI21_1)
@@ -1962,7 +1962,7 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -1986,7 +1986,7 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -2009,7 +2009,7 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2022,7 +2022,7 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2040,7 +2040,7 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -2064,7 +2064,7 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -2087,7 +2087,7 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2100,7 +2100,7 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2118,7 +2118,7 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -2142,7 +2142,7 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -2165,7 +2165,7 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v10, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v10
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2178,7 +2178,7 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v10, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v10
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2196,7 +2196,7 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
@@ -2220,7 +2220,7 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
@@ -2243,7 +2243,7 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v12, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v12
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2256,7 +2256,7 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v12, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v12
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2274,7 +2274,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 4
 ; RV32I-NEXT:    vor.vv v8, v8, v10
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v10, v10, a0
@@ -2298,7 +2298,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 4
 ; RV64I-NEXT:    vor.vv v8, v8, v10
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v10, v10, a0
@@ -2321,7 +2321,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v16, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v16
 ; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2334,7 +2334,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v16, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v16
 ; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2352,7 +2352,7 @@ define <vscale x 32 x i8> @ctlz_zero_undef_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 4
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
 ; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
@@ -2380,7 +2380,7 @@ define <vscale x 64 x i8> @ctlz_zero_undef_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 4
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
 ; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
@@ -2410,7 +2410,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -2443,7 +2443,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -2498,7 +2498,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -2531,7 +2531,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -2586,7 +2586,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -2619,7 +2619,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -2674,7 +2674,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v10
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -2707,7 +2707,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v10
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -2762,7 +2762,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 8
 ; RV32I-NEXT:    vor.vv v8, v8, v12
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -2795,7 +2795,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 8
 ; RV64I-NEXT:    vor.vv v8, v8, v12
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -2850,7 +2850,7 @@ define <vscale x 32 x i16> @ctlz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 8
 ; RV32-NEXT:    vor.vv v8, v8, v16
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 5
 ; RV32-NEXT:    addi a0, a0, 1365
@@ -2883,7 +2883,7 @@ define <vscale x 32 x i16> @ctlz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 8
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 5
 ; RV64-NEXT:    addiw a0, a0, 1365
@@ -2922,7 +2922,7 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -2958,7 +2958,7 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -2989,7 +2989,7 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -3002,7 +3002,7 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -3024,7 +3024,7 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v9
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -3060,7 +3060,7 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v9
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -3091,7 +3091,7 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v10, v8
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
@@ -3104,7 +3104,7 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v10, v8
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
@@ -3126,7 +3126,7 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v10
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -3162,7 +3162,7 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v10
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -3193,7 +3193,7 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v12, v8
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
@@ -3206,7 +3206,7 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v12, v8
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
@@ -3228,7 +3228,7 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 16
 ; RV32I-NEXT:    vor.vv v8, v8, v12
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1365
@@ -3264,7 +3264,7 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 16
 ; RV64I-NEXT:    vor.vv v8, v8, v12
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
 ; RV64I-NEXT:    addiw a0, a0, 1365
@@ -3295,7 +3295,7 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v16, v8
 ; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
@@ -3308,7 +3308,7 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v16, v8
 ; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
@@ -3330,7 +3330,7 @@ define <vscale x 16 x i32> @ctlz_zero_undef_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v16
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 349525
 ; RV32-NEXT:    addi a0, a0, 1365
@@ -3366,7 +3366,7 @@ define <vscale x 16 x i32> @ctlz_zero_undef_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 349525
 ; RV64-NEXT:    addiw a0, a0, 1365
@@ -3429,7 +3429,7 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vsrl.vi v11, v8, 1
@@ -3468,7 +3468,7 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v9
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI40_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI40_1)
@@ -3532,7 +3532,7 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vsrl.vi v14, v8, 1
@@ -3571,7 +3571,7 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v10
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI41_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI41_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI41_1)
@@ -3635,7 +3635,7 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vsrl.vi v20, v8, 1
@@ -3674,7 +3674,7 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v12
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI42_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI42_1)
@@ -3738,7 +3738,7 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v24, (a0), zero
 ; RV32-NEXT:    vsrl.vi v0, v8, 1
@@ -3777,7 +3777,7 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    lui a0, %hi(.LCPI43_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI43_0)(a0)
 ; RV64-NEXT:    lui a1, %hi(.LCPI43_1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
index d33ad60e09618..0d3984c9448f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -60,7 +60,7 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -79,7 +79,7 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -95,7 +95,7 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -116,7 +116,7 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -145,7 +145,7 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -164,7 +164,7 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -180,7 +180,7 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -201,7 +201,7 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -230,7 +230,7 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v10, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v10
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -249,7 +249,7 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v10, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v10
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -265,7 +265,7 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -286,7 +286,7 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -315,7 +315,7 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v12, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v12
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -334,7 +334,7 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v12, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v12
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -350,7 +350,7 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -371,7 +371,7 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -400,7 +400,7 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v16, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v16
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -419,7 +419,7 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v16, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v16
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -435,7 +435,7 @@ define <vscale x 32 x i8> @cttz_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v12, v8, a0
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
 ; CHECK-NEXT:    li a0, 85
@@ -461,7 +461,7 @@ define <vscale x 64 x i8> @cttz_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v16, v8, a0
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
 ; CHECK-NEXT:    li a0, 85
@@ -487,7 +487,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -515,7 +515,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -578,7 +578,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -606,7 +606,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -669,7 +669,7 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -697,7 +697,7 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -760,7 +760,7 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -788,7 +788,7 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -851,7 +851,7 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -879,7 +879,7 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -942,7 +942,7 @@ define <vscale x 32 x i16> @cttz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 5
@@ -970,7 +970,7 @@ define <vscale x 32 x i16> @cttz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 5
@@ -1003,7 +1003,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -1032,7 +1032,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -1066,7 +1066,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v9, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v9
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1086,7 +1086,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1103,7 +1103,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -1132,7 +1132,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -1166,7 +1166,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v10, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v10, zero
+; RV32D-NEXT:    vncvt.x.x.w v9, v10
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1186,7 +1186,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v10, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1203,7 +1203,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -1232,7 +1232,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -1266,7 +1266,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v12, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v12, zero
+; RV32D-NEXT:    vncvt.x.x.w v10, v12
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1286,7 +1286,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v12, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1303,7 +1303,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -1332,7 +1332,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -1366,7 +1366,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v16, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v16, zero
+; RV32D-NEXT:    vncvt.x.x.w v12, v16
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -1386,7 +1386,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v16, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    li a0, 32
@@ -1403,7 +1403,7 @@ define <vscale x 16 x i32> @cttz_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 349525
@@ -1432,7 +1432,7 @@ define <vscale x 16 x i32> @cttz_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 349525
@@ -1484,7 +1484,7 @@ define <vscale x 1 x i64> @cttz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsub.vx v9, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v9
@@ -1515,7 +1515,7 @@ define <vscale x 1 x i64> @cttz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsub.vx v9, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v9
 ; RV64-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI18_0)(a0)
@@ -1568,7 +1568,7 @@ define <vscale x 2 x i64> @cttz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vsub.vx v10, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v10
@@ -1599,7 +1599,7 @@ define <vscale x 2 x i64> @cttz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vsub.vx v10, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v10
 ; RV64-NEXT:    lui a0, %hi(.LCPI19_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI19_0)(a0)
@@ -1652,7 +1652,7 @@ define <vscale x 4 x i64> @cttz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsub.vx v12, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v12
@@ -1683,7 +1683,7 @@ define <vscale x 4 x i64> @cttz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vsub.vx v12, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v12
 ; RV64-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI20_0)(a0)
@@ -1736,7 +1736,7 @@ define <vscale x 8 x i64> @cttz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v24, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v16
@@ -1767,7 +1767,7 @@ define <vscale x 8 x i64> @cttz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v16
 ; RV64-NEXT:    lui a0, %hi(.LCPI21_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI21_0)(a0)
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -1823,7 +1823,7 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -1850,7 +1850,7 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -1866,7 +1866,7 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -1880,7 +1880,7 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -1901,7 +1901,7 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -1928,7 +1928,7 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -1944,7 +1944,7 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -1958,7 +1958,7 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -1979,7 +1979,7 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -2006,7 +2006,7 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v10, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v10
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2022,7 +2022,7 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v10, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v10
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2036,7 +2036,7 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -2057,7 +2057,7 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -2084,7 +2084,7 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v12, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v12
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2100,7 +2100,7 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v12, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v12
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2114,7 +2114,7 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    li a0, 85
@@ -2135,7 +2135,7 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    li a0, 85
@@ -2162,7 +2162,7 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v16, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v16
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2178,7 +2178,7 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v16, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v16
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2192,7 +2192,7 @@ define <vscale x 32 x i8> @cttz_zero_undef_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v12, v8, a0
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
 ; CHECK-NEXT:    li a0, 85
@@ -2217,7 +2217,7 @@ define <vscale x 64 x i8> @cttz_zero_undef_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v16, v8, a0
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
 ; CHECK-NEXT:    li a0, 85
@@ -2242,7 +2242,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -2270,7 +2270,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -2324,7 +2324,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -2352,7 +2352,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -2406,7 +2406,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -2434,7 +2434,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -2488,7 +2488,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -2516,7 +2516,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -2570,7 +2570,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 5
@@ -2598,7 +2598,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 5
@@ -2652,7 +2652,7 @@ define <vscale x 32 x i16> @cttz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 5
@@ -2680,7 +2680,7 @@ define <vscale x 32 x i16> @cttz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 5
@@ -2712,7 +2712,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -2741,7 +2741,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -2775,7 +2775,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v8, v8
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2790,7 +2790,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v8, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2804,7 +2804,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -2833,7 +2833,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -2867,7 +2867,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v10, v8
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
@@ -2882,7 +2882,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v10, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
@@ -2896,7 +2896,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -2925,7 +2925,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -2959,7 +2959,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v12, v8
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
@@ -2974,7 +2974,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v12, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
@@ -2988,7 +2988,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
-; RV32I-NEXT:    vxor.vi v8, v8, -1
+; RV32I-NEXT:    vnot.v v8, v8
 ; RV32I-NEXT:    vand.vv v8, v8, v12
 ; RV32I-NEXT:    vsrl.vi v12, v8, 1
 ; RV32I-NEXT:    lui a0, 349525
@@ -3017,7 +3017,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
-; RV64I-NEXT:    vxor.vi v8, v8, -1
+; RV64I-NEXT:    vnot.v v8, v8
 ; RV64I-NEXT:    vand.vv v8, v8, v12
 ; RV64I-NEXT:    vsrl.vi v12, v8, 1
 ; RV64I-NEXT:    lui a0, 349525
@@ -3051,7 +3051,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vncvt.x.x.w v16, v8
 ; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
@@ -3066,7 +3066,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vncvt.x.x.w v16, v8
 ; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
@@ -3080,7 +3080,7 @@ define <vscale x 16 x i32> @cttz_zero_undef_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 1
 ; RV32-NEXT:    lui a0, 349525
@@ -3109,7 +3109,7 @@ define <vscale x 16 x i32> @cttz_zero_undef_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 1
 ; RV64-NEXT:    lui a0, 349525
@@ -3160,7 +3160,7 @@ define <vscale x 1 x i64> @cttz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsub.vx v9, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v9
@@ -3191,7 +3191,7 @@ define <vscale x 1 x i64> @cttz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsub.vx v9, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v9
 ; RV64-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI40_0)(a0)
@@ -3243,7 +3243,7 @@ define <vscale x 2 x i64> @cttz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vsub.vx v10, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v10
@@ -3274,7 +3274,7 @@ define <vscale x 2 x i64> @cttz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vsub.vx v10, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v10
 ; RV64-NEXT:    lui a0, %hi(.LCPI41_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI41_0)(a0)
@@ -3326,7 +3326,7 @@ define <vscale x 4 x i64> @cttz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsub.vx v12, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v12
@@ -3357,7 +3357,7 @@ define <vscale x 4 x i64> @cttz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vsub.vx v12, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v12
 ; RV64-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI42_0)(a0)
@@ -3409,7 +3409,7 @@ define <vscale x 8 x i64> @cttz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
-; RV32-NEXT:    vxor.vi v8, v8, -1
+; RV32-NEXT:    vnot.v v8, v8
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v24, (a0), zero
 ; RV32-NEXT:    vand.vv v8, v8, v16
@@ -3440,7 +3440,7 @@ define <vscale x 8 x i64> @cttz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
-; RV64-NEXT:    vxor.vi v8, v8, -1
+; RV64-NEXT:    vnot.v v8, v8
 ; RV64-NEXT:    vand.vv v8, v8, v16
 ; RV64-NEXT:    lui a0, %hi(.LCPI43_0)
 ; RV64-NEXT:    ld a0, %lo(.LCPI43_0)(a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
index 3562cdeb816e1..1c32599891cb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
@@ -410,7 +410,7 @@ define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, <vscale x 1 x i8>*
 ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
@@ -474,7 +474,7 @@ define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, <vscale x 2 x i8>*
 ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i16> %x to <vscale x 2 x i8>
@@ -538,7 +538,7 @@ define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, <vscale x 4 x i8>*
 ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i16> %x to <vscale x 4 x i8>
@@ -598,7 +598,7 @@ define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, <vscale x 8 x i8>*
 ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i16> %x to <vscale x 8 x i8>
@@ -658,7 +658,7 @@ define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, <vscale x 16 x
 ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i16> %x to <vscale x 16 x i8>
@@ -694,7 +694,7 @@ define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, <vscale x 32 x
 ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 32 x i16> %x to <vscale x 32 x i8>
@@ -706,9 +706,9 @@ define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, <vscale x 1 x i8>*
 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i8>
@@ -720,7 +720,7 @@ define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, <vscale x 1 x i16
 ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
@@ -758,9 +758,9 @@ define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, <vscale x 2 x i8>*
 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i8>
@@ -772,7 +772,7 @@ define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, <vscale x 2 x i16
 ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i16>
@@ -808,9 +808,9 @@ define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, <vscale x 4 x i8>*
 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i8>
@@ -822,7 +822,7 @@ define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, <vscale x 4 x i16
 ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i16>
@@ -858,9 +858,9 @@ define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, <vscale x 8 x i8>*
 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i8>
@@ -872,7 +872,7 @@ define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, <vscale x 8 x i16
 ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i16>
@@ -908,9 +908,9 @@ define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, <vscale x 16 x
 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i8>
@@ -922,7 +922,7 @@ define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, <vscale x 16 x
 ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i16>
@@ -934,11 +934,11 @@ define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, <vscale x 1 x i8>*
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i8>
@@ -950,9 +950,9 @@ define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, <vscale x 1 x i16
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i16>
@@ -964,7 +964,7 @@ define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, <vscale x 1 x i32
 ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
@@ -976,11 +976,11 @@ define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, <vscale x 2 x i8>*
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i8>
@@ -992,9 +992,9 @@ define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, <vscale x 2 x i16
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i16>
@@ -1006,7 +1006,7 @@ define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, <vscale x 2 x i32
 ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
@@ -1018,11 +1018,11 @@ define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, <vscale x 4 x i8>*
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i8>
@@ -1034,9 +1034,9 @@ define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, <vscale x 4 x i16
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i16>
@@ -1048,7 +1048,7 @@ define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, <vscale x 4 x i32
 ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vs2r.v v12, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
@@ -1060,11 +1060,11 @@ define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, <vscale x 8 x i8>*
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vs1r.v v10, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i8>
@@ -1076,9 +1076,9 @@ define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, <vscale x 8 x i16
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i16>
@@ -1090,7 +1090,7 @@ define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, <vscale x 8 x i32
 ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vs4r.v v16, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
index 210056591cf70..86252ff093a3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI0_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -40,7 +40,7 @@ define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI1_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -63,7 +63,7 @@ define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI2_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -86,7 +86,7 @@ define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI3_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v12, v10, ft0
 ; CHECK-NEXT:    vmerge.vvm v10, v10, v12, v0
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft1
 ; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
@@ -109,7 +109,7 @@ define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI4_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v16, v12, ft0
 ; CHECK-NEXT:    vmerge.vvm v12, v12, v16, v0
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft1
 ; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
@@ -132,7 +132,7 @@ define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI5_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v24, v16, ft0
 ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    vfsgnjx.vv v24, v8, v8
+; CHECK-NEXT:    vfabs.v v24, v8
 ; CHECK-NEXT:    vmflt.vf v0, v24, ft1
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
@@ -155,7 +155,7 @@ define <vscale x 1 x float> @ceil_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI6_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -178,7 +178,7 @@ define <vscale x 2 x float> @ceil_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI7_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -201,7 +201,7 @@ define <vscale x 4 x float> @ceil_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI8_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v12, v10, ft0
 ; CHECK-NEXT:    vmerge.vvm v10, v10, v12, v0
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft1
 ; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
@@ -224,7 +224,7 @@ define <vscale x 8 x float> @ceil_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI9_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v16, v12, ft0
 ; CHECK-NEXT:    vmerge.vvm v12, v12, v16, v0
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft1
 ; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
@@ -247,7 +247,7 @@ define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI10_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v24, v16, ft0
 ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    vfsgnjx.vv v24, v8, v8
+; CHECK-NEXT:    vfabs.v v24, v8
 ; CHECK-NEXT:    vmflt.vf v0, v24, ft1
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
@@ -270,7 +270,7 @@ define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI11_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -293,7 +293,7 @@ define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v12, v10, ft0
 ; CHECK-NEXT:    vmerge.vvm v10, v10, v12, v0
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft1
 ; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
@@ -316,7 +316,7 @@ define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI13_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v16, v12, ft0
 ; CHECK-NEXT:    vmerge.vvm v12, v12, v16, v0
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft1
 ; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
@@ -339,7 +339,7 @@ define <vscale x 8 x double> @ceil_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI14_1)(a0)
 ; CHECK-NEXT:    vfadd.vf v24, v16, ft0
 ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    vfsgnjx.vv v24, v8, v8
+; CHECK-NEXT:    vfabs.v v24, v8
 ; CHECK-NEXT:    vmflt.vf v0, v24, ft1
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
index 92e2deb5dee82..a261887305016 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI0_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -40,7 +40,7 @@ define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI1_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -63,7 +63,7 @@ define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI2_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -86,7 +86,7 @@ define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI3_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v12, v10, ft0
 ; CHECK-NEXT:    vmerge.vvm v10, v10, v12, v0
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft1
 ; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
@@ -109,7 +109,7 @@ define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI4_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v16, v12, ft0
 ; CHECK-NEXT:    vmerge.vvm v12, v12, v16, v0
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft1
 ; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
@@ -132,7 +132,7 @@ define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI5_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v24, v16, ft0
 ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    vfsgnjx.vv v24, v8, v8
+; CHECK-NEXT:    vfabs.v v24, v8
 ; CHECK-NEXT:    vmflt.vf v0, v24, ft1
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
@@ -155,7 +155,7 @@ define <vscale x 1 x float> @floor_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI6_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -178,7 +178,7 @@ define <vscale x 2 x float> @floor_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI7_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -201,7 +201,7 @@ define <vscale x 4 x float> @floor_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI8_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v12, v10, ft0
 ; CHECK-NEXT:    vmerge.vvm v10, v10, v12, v0
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft1
 ; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
@@ -224,7 +224,7 @@ define <vscale x 8 x float> @floor_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI9_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v16, v12, ft0
 ; CHECK-NEXT:    vmerge.vvm v12, v12, v16, v0
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft1
 ; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
@@ -247,7 +247,7 @@ define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI10_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v24, v16, ft0
 ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    vfsgnjx.vv v24, v8, v8
+; CHECK-NEXT:    vfabs.v v24, v8
 ; CHECK-NEXT:    vmflt.vf v0, v24, ft1
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
@@ -270,7 +270,7 @@ define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI11_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -293,7 +293,7 @@ define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v12, v10, ft0
 ; CHECK-NEXT:    vmerge.vvm v10, v10, v12, v0
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft1
 ; CHECK-NEXT:    vfsgnj.vv v10, v10, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
@@ -316,7 +316,7 @@ define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI13_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v16, v12, ft0
 ; CHECK-NEXT:    vmerge.vvm v12, v12, v16, v0
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft1
 ; CHECK-NEXT:    vfsgnj.vv v12, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
@@ -339,7 +339,7 @@ define <vscale x 8 x double> @floor_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI14_1)(a0)
 ; CHECK-NEXT:    vfsub.vf v24, v16, ft0
 ; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    vfsgnjx.vv v24, v8, v8
+; CHECK-NEXT:    vfabs.v v24, v8
 ; CHECK-NEXT:    vmflt.vf v0, v24, ft1
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index 9daaf921251cf..4f141e44f8ac3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -21,7 +21,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    vsrl.vi v9, v8, 4
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
 ; CHECK-NEXT:    li a1, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a1
@@ -47,7 +47,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX8-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX8-NEXT:    li a1, 134
 ; LMULMAX8-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-NEXT:    vrsub.vx v8, v9, a1
@@ -75,7 +75,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32I-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v9, v8, 8
 ; LMULMAX2-RV32I-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV32I-NEXT:    lui a1, 5
 ; LMULMAX2-RV32I-NEXT:    addi a1, a1, 1365
@@ -110,7 +110,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64I-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v9, v8, 8
 ; LMULMAX2-RV64I-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV64I-NEXT:    lui a1, 5
 ; LMULMAX2-RV64I-NEXT:    addiw a1, a1, 1365
@@ -145,7 +145,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 8
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a1, 5
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, 1365
@@ -180,7 +180,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 8
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a1, 5
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, 1365
@@ -267,7 +267,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV32I-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX2-RV32I-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV32I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV32I-NEXT:    lui a1, 349525
 ; LMULMAX2-RV32I-NEXT:    addi a1, a1, 1365
@@ -305,7 +305,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV64I-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX2-RV64I-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV64I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV64I-NEXT:    lui a1, 349525
 ; LMULMAX2-RV64I-NEXT:    addiw a1, a1, 1365
@@ -343,7 +343,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a1, 349525
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, 1365
@@ -381,7 +381,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a1, 349525
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, 1365
@@ -532,7 +532,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; LMULMAX2-RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
 ; LMULMAX2-RV64-NEXT:    lui a2, %hi(.LCPI3_1)
@@ -631,7 +631,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    li a1, 32
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; LMULMAX1-RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI3_1)
@@ -730,7 +730,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX8-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX8-RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; LMULMAX8-RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
 ; LMULMAX8-RV64-NEXT:    lui a2, %hi(.LCPI3_1)
@@ -774,7 +774,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX2-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-NEXT:    vsrl.vi v10, v8, 4
 ; LMULMAX2-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-NEXT:    vnot.v v8, v8
 ; LMULMAX2-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-NEXT:    li a1, 85
 ; LMULMAX2-NEXT:    vand.vx v10, v10, a1
@@ -802,7 +802,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-NEXT:    vsrl.vi v10, v8, 4
 ; LMULMAX1-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-NEXT:    vnot.v v8, v8
 ; LMULMAX1-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-NEXT:    li a2, 85
 ; LMULMAX1-NEXT:    vand.vx v10, v10, a2
@@ -821,7 +821,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-NEXT:    vor.vv v9, v9, v10
 ; LMULMAX1-NEXT:    vsrl.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vor.vv v9, v9, v10
-; LMULMAX1-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-NEXT:    vnot.v v9, v9
 ; LMULMAX1-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-NEXT:    vsub.vv v9, v9, v10
@@ -847,7 +847,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; LMULMAX8-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v10, v12, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v10, v12
 ; LMULMAX8-NEXT:    li a1, 134
 ; LMULMAX8-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-NEXT:    vrsub.vx v8, v10, a1
@@ -875,7 +875,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 8
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV32-NEXT:    lui a1, 5
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, 1365
@@ -910,7 +910,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 8
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV64-NEXT:    lui a1, 5
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, 1365
@@ -947,7 +947,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 8
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a2, 5
 ; LMULMAX1-RV32-NEXT:    addi a2, a2, 1365
@@ -975,7 +975,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v9, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 8
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-RV32-NEXT:    vsub.vv v9, v9, v10
@@ -1006,7 +1006,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 8
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a2, 5
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, 1365
@@ -1034,7 +1034,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 8
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-RV64-NEXT:    vsub.vv v9, v9, v10
@@ -1087,7 +1087,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV32-NEXT:    lui a1, 349525
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, 1365
@@ -1125,7 +1125,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV64-NEXT:    lui a1, 349525
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, 1365
@@ -1165,7 +1165,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a2, 349525
 ; LMULMAX1-RV32-NEXT:    addi a2, a2, 1365
@@ -1196,7 +1196,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v9, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 16
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v9, v10
-; LMULMAX1-RV32-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-RV32-NEXT:    vsub.vv v9, v9, v10
@@ -1229,7 +1229,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a2, 349525
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, 1365
@@ -1260,7 +1260,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 16
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-RV64-NEXT:    vsub.vv v9, v9, v10
@@ -1374,7 +1374,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    lui a1, %hi(.LCPI7_0)
 ; LMULMAX2-RV64-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
 ; LMULMAX2-RV64-NEXT:    lui a2, %hi(.LCPI7_1)
@@ -1503,7 +1503,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    li a2, 32
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    lui a3, %hi(.LCPI7_0)
 ; LMULMAX1-RV64-NEXT:    ld a3, %lo(.LCPI7_0)(a3)
 ; LMULMAX1-RV64-NEXT:    lui a4, %hi(.LCPI7_1)
@@ -1537,7 +1537,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v9, a2
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
-; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX1-RV64-NEXT:    vsub.vv v9, v9, v10
@@ -1628,7 +1628,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v10, v8, a1
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX8-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX8-RV64-NEXT:    lui a1, %hi(.LCPI7_0)
 ; LMULMAX8-RV64-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
 ; LMULMAX8-RV64-NEXT:    lui a2, %hi(.LCPI7_1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
index b4c51f1da83b3..ed58efa3e391d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
@@ -17,7 +17,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    vsub.vx v9, v8, a1
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
 ; CHECK-NEXT:    li a1, 85
@@ -46,7 +46,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX8-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX8-NEXT:    li a1, 127
 ; LMULMAX8-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-NEXT:    vsub.vx v8, v9, a1
@@ -68,7 +68,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32I-NEXT:    vle16.v v8, (a0)
 ; LMULMAX2-RV32I-NEXT:    li a1, 1
 ; LMULMAX2-RV32I-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32I-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV32I-NEXT:    lui a1, 5
@@ -98,7 +98,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64I-NEXT:    vle16.v v8, (a0)
 ; LMULMAX2-RV64I-NEXT:    li a1, 1
 ; LMULMAX2-RV64I-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64I-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV64I-NEXT:    lui a1, 5
@@ -128,7 +128,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vle16.v v8, (a0)
 ; LMULMAX1-RV32-NEXT:    li a1, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a1, 5
@@ -158,7 +158,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vle16.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a1, 5
@@ -244,7 +244,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV32I-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-RV32I-NEXT:    li a1, 1
 ; LMULMAX2-RV32I-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX2-RV32I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32I-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV32I-NEXT:    lui a1, 349525
@@ -275,7 +275,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV64I-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-RV64I-NEXT:    li a1, 1
 ; LMULMAX2-RV64I-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX2-RV64I-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64I-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64I-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX2-RV64I-NEXT:    lui a1, 349525
@@ -306,7 +306,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-RV32-NEXT:    li a1, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a1, 349525
@@ -337,7 +337,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a1, 349525
@@ -474,7 +474,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; LMULMAX2-RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
@@ -553,7 +553,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; LMULMAX1-RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
@@ -632,7 +632,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vle64.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    li a1, 1
 ; LMULMAX8-RV64-NEXT:    vsub.vx v9, v8, a1
-; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX8-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX8-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX8-RV64-NEXT:    lui a1, %hi(.LCPI3_0)
 ; LMULMAX8-RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
@@ -673,7 +673,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    li a1, 1
 ; LMULMAX2-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX2-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-NEXT:    vnot.v v8, v8
 ; LMULMAX2-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-NEXT:    li a1, 85
@@ -698,7 +698,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-NEXT:    vle8.v v9, (a0)
 ; LMULMAX1-NEXT:    li a2, 1
 ; LMULMAX1-NEXT:    vsub.vx v10, v8, a2
-; LMULMAX1-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-NEXT:    vnot.v v8, v8
 ; LMULMAX1-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-NEXT:    li a3, 85
@@ -713,7 +713,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-NEXT:    vadd.vv v8, v8, v10
 ; LMULMAX1-NEXT:    vand.vi v8, v8, 15
 ; LMULMAX1-NEXT:    vsub.vx v10, v9, a2
-; LMULMAX1-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-NEXT:    vnot.v v9, v9
 ; LMULMAX1-NEXT:    vand.vv v9, v9, v10
 ; LMULMAX1-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-NEXT:    vand.vx v10, v10, a3
@@ -742,7 +742,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; LMULMAX8-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v10, v12, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v10, v12
 ; LMULMAX8-NEXT:    li a1, 127
 ; LMULMAX8-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-NEXT:    vsub.vx v8, v10, a1
@@ -764,7 +764,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vle16.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV32-NEXT:    lui a1, 5
@@ -794,7 +794,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vle16.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV64-NEXT:    lui a1, 5
@@ -826,7 +826,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vle16.v v9, (a0)
 ; LMULMAX1-RV32-NEXT:    li a2, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v8, a2
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a3, 5
@@ -848,7 +848,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a6
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v9, a2
-; LMULMAX1-RV32-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV32-NEXT:    vand.vv v9, v9, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a3
@@ -874,7 +874,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vle16.v v9, (a0)
 ; LMULMAX1-RV64-NEXT:    li a2, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a2
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a3, 5
@@ -896,7 +896,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a6
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v9, a2
-; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV64-NEXT:    vand.vv v9, v9, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
@@ -944,7 +944,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV32-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV32-NEXT:    lui a1, 349525
@@ -975,7 +975,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX2-RV64-NEXT:    lui a1, 349525
@@ -1008,7 +1008,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vle32.v v9, (a0)
 ; LMULMAX1-RV32-NEXT:    li a2, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v8, a2
-; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV32-NEXT:    lui a3, 349525
@@ -1031,7 +1031,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a6
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 24
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v9, a2
-; LMULMAX1-RV32-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV32-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV32-NEXT:    vand.vv v9, v9, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a3
@@ -1057,7 +1057,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vle32.v v9, (a0)
 ; LMULMAX1-RV64-NEXT:    li a2, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a2
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 1
 ; LMULMAX1-RV64-NEXT:    lui a3, 349525
@@ -1080,7 +1080,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a6
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 24
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v9, a2
-; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV64-NEXT:    vand.vv v9, v9, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
@@ -1176,7 +1176,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX2-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    lui a1, %hi(.LCPI7_0)
 ; LMULMAX2-RV64-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
@@ -1275,7 +1275,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vle64.v v9, (a0)
 ; LMULMAX1-RV64-NEXT:    li a2, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a2
-; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    lui a3, %hi(.LCPI7_0)
 ; LMULMAX1-RV64-NEXT:    ld a3, %lo(.LCPI7_0)(a3)
@@ -1299,7 +1299,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    li a7, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a7
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v9, a2
-; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
+; LMULMAX1-RV64-NEXT:    vnot.v v9, v9
 ; LMULMAX1-RV64-NEXT:    vand.vv v9, v9, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
@@ -1370,7 +1370,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vle64.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    li a1, 1
 ; LMULMAX8-RV64-NEXT:    vsub.vx v10, v8, a1
-; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
+; LMULMAX8-RV64-NEXT:    vnot.v v8, v8
 ; LMULMAX8-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX8-RV64-NEXT:    lui a1, %hi(.LCPI7_0)
 ; LMULMAX8-RV64-NEXT:    ld a1, %lo(.LCPI7_0)(a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
index f74b73583fc89..f2fd6e1d0d3a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
@@ -573,7 +573,7 @@ define void @truncstore_v2i16_v2i8(<2 x i16> %x, <2 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v2i16_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i16> %x to <2 x i8>
@@ -637,7 +637,7 @@ define void @truncstore_v4i16_v4i8(<4 x i16> %x, <4 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v4i16_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <4 x i16> %x to <4 x i8>
@@ -723,7 +723,7 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, <8 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v8i16_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <8 x i16> %x to <8 x i8>
@@ -847,8 +847,8 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i16_v16i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -857,7 +857,7 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i16_v16i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX4-NEXT:    vse8.v v10, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <16 x i16> %x to <16 x i8>
@@ -1031,9 +1031,9 @@ define void @truncstore_v2i32_v2i8(<2 x i32> %x, <2 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v2i32_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i32> %x to <2 x i8>
@@ -1045,7 +1045,7 @@ define void @truncstore_v2i32_v2i16(<2 x i32> %x, <2 x i16>* %z) {
 ; CHECK-LABEL: truncstore_v2i32_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i32> %x to <2 x i16>
@@ -1083,9 +1083,9 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v4i32_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <4 x i32> %x to <4 x i8>
@@ -1097,7 +1097,7 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, <4 x i16>* %z) {
 ; CHECK-LABEL: truncstore_v4i32_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <4 x i32> %x to <4 x i16>
@@ -1157,13 +1157,13 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i32_v8i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1172,9 +1172,9 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i32_v8i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v10, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i32> %x to <8 x i8>
@@ -1186,8 +1186,8 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i32_v8i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
@@ -1196,7 +1196,7 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i32_v8i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX4-NEXT:    vse16.v v10, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i32> %x to <8 x i16>
@@ -1270,25 +1270,25 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i32_v16i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 12
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1297,9 +1297,9 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i32_v16i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v12, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v12
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <16 x i32> %x to <16 x i8>
@@ -1311,13 +1311,13 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i32_v16i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
-; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -1328,7 +1328,7 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i32_v16i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
 ; LMULMAX4-NEXT:    vse16.v v12, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <16 x i32> %x to <16 x i16>
@@ -1436,11 +1436,11 @@ define void @truncstore_v2i64_v2i8(<2 x i64> %x, <2 x i8>* %z) {
 ; CHECK-LABEL: truncstore_v2i64_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i64> %x to <2 x i8>
@@ -1452,9 +1452,9 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, <2 x i16>* %z) {
 ; CHECK-LABEL: truncstore_v2i64_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i64> %x to <2 x i16>
@@ -1466,7 +1466,7 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, <2 x i32>* %z) {
 ; CHECK-LABEL: truncstore_v2i64_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <2 x i64> %x to <2 x i32>
@@ -1478,17 +1478,17 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v4i64_v4i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1497,11 +1497,11 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v4i64_v4i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v10, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <4 x i64> %x to <4 x i8>
@@ -1513,13 +1513,13 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v4i64_v4i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
@@ -1528,9 +1528,9 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v4i64_v4i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v10, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <4 x i64> %x to <4 x i16>
@@ -1542,8 +1542,8 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
 ; LMULMAX1-LABEL: truncstore_v4i64_v4i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
@@ -1552,7 +1552,7 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
 ; LMULMAX4-LABEL: truncstore_v4i64_v4i32:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX4-NEXT:    vse32.v v10, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <4 x i64> %x to <4 x i32>
@@ -1564,33 +1564,33 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i64_v8i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1599,11 +1599,11 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i64_v8i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v12, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v12
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i64> %x to <8 x i8>
@@ -1615,25 +1615,25 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i64_v8i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
@@ -1642,9 +1642,9 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i64_v8i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v12, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v12
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i64> %x to <8 x i16>
@@ -1656,13 +1656,13 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
 ; LMULMAX1-LABEL: truncstore_v8i64_v8i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
-; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -1673,7 +1673,7 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
 ; LMULMAX4-LABEL: truncstore_v8i64_v8i32:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)
 ; LMULMAX4-NEXT:    ret
   %y = trunc <8 x i64> %x to <8 x i32>
@@ -1685,65 +1685,65 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i64_v16i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 10, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v13, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v13
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 10
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v14, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v14
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 14, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 12
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v15, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v15
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 14
 ; LMULMAX1-NEXT:    vse8.v v8, (a0)
@@ -1752,17 +1752,17 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i64_v16i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v16, v12, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v16, v12
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v16, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v16
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v12, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v12
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v14, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v14, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v14, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v14
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, tu, mu
 ; LMULMAX4-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
@@ -1776,47 +1776,47 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i64_v16i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 6
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v13, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v13
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v10, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v14, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v14
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v15, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v15
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 6
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -1827,13 +1827,13 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i64_v16i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v16, v12, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v16, v12
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v12, v16, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v16
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v14, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v14, v8
 ; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v8, v14, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v8, v14
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, tu, mu
 ; LMULMAX4-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
@@ -1847,23 +1847,23 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
 ; LMULMAX1-LABEL: truncstore_v16i64_v16i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
-; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v13, zero
-; LMULMAX1-NEXT:    vnsrl.wx v11, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v13
+; LMULMAX1-NEXT:    vncvt.x.x.w v11, v12
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v11, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v15, zero
-; LMULMAX1-NEXT:    vnsrl.wx v12, v14, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v15
+; LMULMAX1-NEXT:    vncvt.x.x.w v12, v14
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v12, v9, 2
 ; LMULMAX1-NEXT:    addi a1, a0, 48
@@ -1878,8 +1878,8 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
 ; LMULMAX4-LABEL: truncstore_v16i64_v16i32:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX4-NEXT:    vnsrl.wx v16, v12, zero
-; LMULMAX4-NEXT:    vnsrl.wx v12, v8, zero
+; LMULMAX4-NEXT:    vncvt.x.x.w v16, v12
+; LMULMAX4-NEXT:    vncvt.x.x.w v12, v8
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, tu, mu
 ; LMULMAX4-NEXT:    vslideup.vi v12, v16, 8
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 1f89dd788d01d..88af4a90c13cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -201,7 +201,7 @@ define void @fneg_v8f16(<8 x half>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -215,7 +215,7 @@ define void @fneg_v4f32(<4 x float>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -229,7 +229,7 @@ define void @fneg_v2f64(<2 x double>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -243,7 +243,7 @@ define void @fabs_v8f16(<8 x half>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -258,7 +258,7 @@ define void @fabs_v4f32(<4 x float>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -273,7 +273,7 @@ define void @fabs_v2f64(<2 x double>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -1191,7 +1191,7 @@ define void @fneg_v16f16(<16 x half>* %x) {
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX2-NEXT:    vle16.v v8, (a0)
-; LMULMAX2-NEXT:    vfsgnjn.vv v8, v8, v8
+; LMULMAX2-NEXT:    vfneg.v v8, v8
 ; LMULMAX2-NEXT:    vse16.v v8, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
@@ -1201,8 +1201,8 @@ define void @fneg_v16f16(<16 x half>* %x) {
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle16.v v8, (a1)
 ; LMULMAX1-NEXT:    vle16.v v9, (a0)
-; LMULMAX1-NEXT:    vfsgnjn.vv v8, v8, v8
-; LMULMAX1-NEXT:    vfsgnjn.vv v9, v9, v9
+; LMULMAX1-NEXT:    vfneg.v v8, v8
+; LMULMAX1-NEXT:    vfneg.v v9, v9
 ; LMULMAX1-NEXT:    vse16.v v9, (a0)
 ; LMULMAX1-NEXT:    vse16.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -1217,7 +1217,7 @@ define void @fneg_v8f32(<8 x float>* %x) {
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-NEXT:    vfsgnjn.vv v8, v8, v8
+; LMULMAX2-NEXT:    vfneg.v v8, v8
 ; LMULMAX2-NEXT:    vse32.v v8, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
@@ -1227,8 +1227,8 @@ define void @fneg_v8f32(<8 x float>* %x) {
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v8, (a1)
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-NEXT:    vfsgnjn.vv v8, v8, v8
-; LMULMAX1-NEXT:    vfsgnjn.vv v9, v9, v9
+; LMULMAX1-NEXT:    vfneg.v v8, v8
+; LMULMAX1-NEXT:    vfneg.v v9, v9
 ; LMULMAX1-NEXT:    vse32.v v9, (a0)
 ; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -1243,7 +1243,7 @@ define void @fneg_v4f64(<4 x double>* %x) {
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-NEXT:    vfsgnjn.vv v8, v8, v8
+; LMULMAX2-NEXT:    vfneg.v v8, v8
 ; LMULMAX2-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
@@ -1253,8 +1253,8 @@ define void @fneg_v4f64(<4 x double>* %x) {
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v8, (a1)
 ; LMULMAX1-NEXT:    vle64.v v9, (a0)
-; LMULMAX1-NEXT:    vfsgnjn.vv v8, v8, v8
-; LMULMAX1-NEXT:    vfsgnjn.vv v9, v9, v9
+; LMULMAX1-NEXT:    vfneg.v v8, v8
+; LMULMAX1-NEXT:    vfneg.v v9, v9
 ; LMULMAX1-NEXT:    vse64.v v9, (a0)
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-NEXT:    ret
@@ -1971,7 +1971,7 @@ define void @trunc_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI91_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI91_0)(a1)
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -1993,7 +1993,7 @@ define void @trunc_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI92_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI92_0)(a1)
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -2015,7 +2015,7 @@ define void @trunc_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI93_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI93_0)(a1)
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -2044,7 +2044,7 @@ define void @ceil_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI94_1)(a1)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -2071,7 +2071,7 @@ define void @ceil_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI95_1)(a1)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -2098,7 +2098,7 @@ define void @ceil_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI96_1)(a1)
 ; CHECK-NEXT:    vfadd.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -2125,7 +2125,7 @@ define void @floor_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI97_1)(a1)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -2152,7 +2152,7 @@ define void @floor_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI98_1)(a1)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -2179,7 +2179,7 @@ define void @floor_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI99_1)(a1)
 ; CHECK-NEXT:    vfsub.vf v10, v9, ft0
 ; CHECK-NEXT:    vmerge.vvm v9, v9, v10, v0
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft1
 ; CHECK-NEXT:    vfsgnj.vv v9, v9, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
@@ -2201,7 +2201,7 @@ define void @round_v8f16(<8 x half>* %x) {
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI100_0)(a1)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI100_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI100_1)(a1)
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -2226,7 +2226,7 @@ define void @round_v4f32(<4 x float>* %x) {
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI101_0)(a1)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI101_1)
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI101_1)(a1)
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -2251,7 +2251,7 @@ define void @round_v2f64(<2 x double>* %x) {
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI102_0)(a1)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI102_1)
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI102_1)(a1)
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 1c6b5099eb017..566fcdbc2ca9a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -348,9 +348,9 @@ define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -367,9 +367,9 @@ define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -410,9 +410,9 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v8, v12, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v8, v12
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX8-NEXT:    vse8.v v8, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
@@ -429,31 +429,31 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v10, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v11, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v11, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v11, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v11, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse8.v v10, (a1)
@@ -472,9 +472,9 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v8, v12, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v8, v12
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX8-NEXT:    vse8.v v8, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
@@ -491,31 +491,31 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v10, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v10, v10, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v11, v12, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v11, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v11, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v11, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v11, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v11, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
 ; LMULMAX1-NEXT:    vse8.v v10, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index 59b4ba7fa8058..ca6f78dce853d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -169,9 +169,9 @@ define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
@@ -186,9 +186,9 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX8-NEXT:    vnsrl.wx v8, v10, zero
+; LMULMAX8-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX8-NEXT:    vse8.v v8, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
@@ -197,9 +197,9 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; LMULMAX2-NEXT:    vnsrl.wx v10, v8, zero
+; LMULMAX2-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX2-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; LMULMAX2-NEXT:    vnsrl.wx v8, v10, zero
+; LMULMAX2-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX2-NEXT:    vse8.v v8, (a1)
 ; LMULMAX2-NEXT:    ret
 ;
@@ -210,13 +210,13 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v8, v8, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vnsrl.wx v9, v9, zero
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
 ; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
 ; LMULMAX1-NEXT:    vse8.v v8, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index a0e494fff4775..ab10e53485a3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -6493,7 +6493,7 @@ define void @xor_vi_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
@@ -6509,7 +6509,7 @@ define void @xor_vi_v8i16(<8 x i16>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
@@ -6525,7 +6525,7 @@ define void @xor_vi_v4i32(<4 x i32>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
@@ -6541,7 +6541,7 @@ define void @xor_vi_v2i64(<2 x i64>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index ca45a3d09d291..35cb5f7a16485 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -1032,7 +1032,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1059,7 +1059,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1110,7 +1110,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1137,7 +1137,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1187,7 +1187,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1214,7 +1214,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1240,7 +1240,7 @@ define <8 x i64> @mgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1897,7 +1897,7 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %id
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1924,7 +1924,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %id
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -1975,7 +1975,7 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2002,7 +2002,7 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2052,7 +2052,7 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2079,7 +2079,7 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -2105,7 +2105,7 @@ define <8 x double> @mgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v12, (a0), v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 000f1a397b4d9..bc1f4b02d2193 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -44,14 +44,14 @@ define void @mscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: mscatter_v2i16_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i16> %val to <2 x i8>
@@ -63,18 +63,18 @@ define void @mscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: mscatter_v2i32_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i32> %val to <2 x i8>
@@ -86,22 +86,22 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: mscatter_v2i64_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i64> %val to <2 x i8>
@@ -232,14 +232,14 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <
 ; RV32-LABEL: mscatter_v2i32_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i32> %val to <2 x i16>
@@ -251,18 +251,18 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <
 ; RV32-LABEL: mscatter_v2i64_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i64> %val to <2 x i16>
@@ -466,14 +466,14 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <
 ; RV32-LABEL: mscatter_v2i64_truncstore_v2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <2 x i64> %val to <2 x i32>
@@ -827,7 +827,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -852,7 +852,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -899,7 +899,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -924,7 +924,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -970,7 +970,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -995,7 +995,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1019,7 +1019,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1609,7 +1609,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1634,7 +1634,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1681,7 +1681,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1706,7 +1706,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1752,7 +1752,7 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1777,7 +1777,7 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1801,7 +1801,7 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
index 28a8dc9ac546c..c59d50cd15250 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
@@ -20,7 +20,7 @@ define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vfneg_vv_v2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
@@ -44,7 +44,7 @@ define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vfneg_vv_v4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
@@ -68,7 +68,7 @@ define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vfneg_vv_v8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
@@ -92,7 +92,7 @@ define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vfneg_vv_v16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
@@ -116,7 +116,7 @@ define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vfneg_vv_v2f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
@@ -140,7 +140,7 @@ define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vfneg_vv_v4f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
@@ -164,7 +164,7 @@ define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vfneg_vv_v8f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
@@ -188,7 +188,7 @@ define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-LABEL: vfneg_vv_v16f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
@@ -212,7 +212,7 @@ define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vfneg_vv_v2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
@@ -236,7 +236,7 @@ define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vfneg_vv_v4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
@@ -260,7 +260,7 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-LABEL: vfneg_vv_v8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
@@ -284,7 +284,7 @@ define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-LABEL: vfneg_vv_v15f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
@@ -308,7 +308,7 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-LABEL: vfneg_vv_v16f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
@@ -356,13 +356,13 @@ define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:  .LBB27_2:
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; CHECK-NEXT:    li a1, 16
-; CHECK-NEXT:    vfsgnjn.vv v16, v16, v16
+; CHECK-NEXT:    vfneg.v v16, v16
 ; CHECK-NEXT:    bltu a0, a1, .LBB27_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:  .LBB27_4:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 4adf754acc909..56c1da15c6384 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -965,7 +965,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
 ; RV32-NEXT:    vsext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -991,7 +991,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
 ; RV32-NEXT:    vzext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1040,7 +1040,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vsext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1066,7 +1066,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
 ; RV32-NEXT:    vzext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1114,7 +1114,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vsext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1140,7 +1140,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
 ; RV32-NEXT:    vzext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1165,7 +1165,7 @@ define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1702,7 +1702,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %i
 ; RV32-NEXT:    vsext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1728,7 +1728,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %i
 ; RV32-NEXT:    vzext.vf8 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1777,7 +1777,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16>
 ; RV32-NEXT:    vsext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1803,7 +1803,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16>
 ; RV32-NEXT:    vzext.vf4 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1851,7 +1851,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32>
 ; RV32-NEXT:    vsext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1877,7 +1877,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32>
 ; RV32-NEXT:    vzext.vf2 v12, v8
 ; RV32-NEXT:    vsll.vi v8, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -1902,7 +1902,7 @@ define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v12, v0.t
 ; RV32-NEXT:    ret
@@ -2055,7 +2055,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v16
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v10, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2068,7 +2068,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v4, v24, zero
+; RV32-NEXT:    vncvt.x.x.w v4, v24
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v10
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2128,7 +2128,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v12, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v12, v16
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v10, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2141,7 +2141,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v4, v24, zero
+; RV32-NEXT:    vncvt.x.x.w v4, v24
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v10
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2265,7 +2265,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v16
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v12, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2278,7 +2278,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v4, v24, zero
+; RV32-NEXT:    vncvt.x.x.w v4, v24
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2338,7 +2338,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v16
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v12, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2351,7 +2351,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v24, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v4, v24, zero
+; RV32-NEXT:    vncvt.x.x.w v4, v24
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v12
 ; RV32-NEXT:    vluxei32.v v8, (a0), v4, v0.t
@@ -2474,7 +2474,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v4, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v4, v8
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v1, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2487,7 +2487,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v1
 ; RV32-NEXT:    vluxei32.v v8, (a0), v24, v0.t
@@ -2547,7 +2547,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v4, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v4, v8
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v1, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2560,7 +2560,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v1
 ; RV32-NEXT:    vluxei32.v v8, (a0), v24, v0.t
@@ -2616,7 +2616,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v28, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v28, v16
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v24, 2
 ; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -2629,7 +2629,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v28, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v28, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v24
 ; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index cbc916b136de3..53e34f3735c2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -26,7 +26,7 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -34,7 +34,7 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
 ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -47,9 +47,9 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -57,9 +57,9 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
 ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -72,11 +72,11 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
 ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -84,11 +84,11 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
 ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -194,7 +194,7 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
 ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -202,7 +202,7 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
 ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -215,9 +215,9 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
 ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -225,9 +225,9 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
 ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -442,7 +442,7 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
 ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -450,7 +450,7 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
 ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
 ; RV64-NEXT:    ret
@@ -779,7 +779,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -805,7 +805,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -854,7 +854,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -880,7 +880,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -928,7 +928,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -954,7 +954,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -979,7 +979,7 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1500,7 +1500,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vsext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1526,7 +1526,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
 ; RV32-NEXT:    vzext.vf8 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1575,7 +1575,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vsext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1601,7 +1601,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vzext.vf4 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1649,7 +1649,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vsext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1675,7 +1675,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base
 ; RV32-NEXT:    vzext.vf2 v16, v12
 ; RV32-NEXT:    vsll.vi v12, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1700,7 +1700,7 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v12, v12, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v12, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v12
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1924,7 +1924,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    addi a1, a2, -16
 ; RV32-NEXT:    addi a4, sp, 16
@@ -1937,7 +1937,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v0, 2
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
@@ -2058,7 +2058,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    addi a1, a2, -16
 ; RV32-NEXT:    addi a4, sp, 16
@@ -2071,7 +2071,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v0, 2
 ; RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
index 99a5e3883183c..4b571347898c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
@@ -120,7 +120,7 @@ define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v2i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
@@ -222,7 +222,7 @@ define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v4i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
@@ -324,7 +324,7 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v8i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
@@ -426,7 +426,7 @@ define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v9i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
@@ -528,7 +528,7 @@ define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v16i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
@@ -630,7 +630,7 @@ define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v2i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
@@ -732,7 +732,7 @@ define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v4i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
@@ -834,7 +834,7 @@ define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v8i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
@@ -936,7 +936,7 @@ define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v16i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
@@ -1038,7 +1038,7 @@ define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v2i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
@@ -1140,7 +1140,7 @@ define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v4i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
@@ -1242,7 +1242,7 @@ define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v8i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
@@ -1344,7 +1344,7 @@ define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v16i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
@@ -1474,7 +1474,7 @@ define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v2i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
@@ -1604,7 +1604,7 @@ define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v4i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
@@ -1734,7 +1734,7 @@ define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v8i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
@@ -1864,7 +1864,7 @@ define <16 x i64> @vxor_vi_v16i64_unmasked_1(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vxor_vi_v16i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
index a2acc9e34059e..c6dc78b2a7383 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
@@ -12,7 +12,7 @@ define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI0_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -33,7 +33,7 @@ define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI1_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -54,7 +54,7 @@ define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI2_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -75,7 +75,7 @@ define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI3_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
 ; CHECK-NEXT:    vfadd.vf v10, v10, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10
@@ -96,7 +96,7 @@ define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI4_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
 ; CHECK-NEXT:    vfadd.vf v12, v12, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12
@@ -117,7 +117,7 @@ define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_1)
 ; CHECK-NEXT:    flh ft1, %lo(.LCPI5_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
 ; CHECK-NEXT:    vfadd.vf v16, v16, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
@@ -138,7 +138,7 @@ define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI6_1)
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI6_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -159,7 +159,7 @@ define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI7_1)
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI7_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -180,7 +180,7 @@ define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI8_1)
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI8_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
 ; CHECK-NEXT:    vfadd.vf v10, v10, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10
@@ -201,7 +201,7 @@ define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI9_1)
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI9_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
 ; CHECK-NEXT:    vfadd.vf v12, v12, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12
@@ -222,7 +222,7 @@ define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI10_1)
 ; CHECK-NEXT:    flw ft1, %lo(.LCPI10_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
 ; CHECK-NEXT:    vfadd.vf v16, v16, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
@@ -243,7 +243,7 @@ define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_1)
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI11_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfadd.vf v9, v9, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v9
@@ -264,7 +264,7 @@ define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_1)
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI12_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
 ; CHECK-NEXT:    vfadd.vf v10, v10, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v10
@@ -285,7 +285,7 @@ define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_1)
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI13_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
 ; CHECK-NEXT:    vfadd.vf v12, v12, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v12
@@ -306,7 +306,7 @@ define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_1)
 ; CHECK-NEXT:    fld ft1, %lo(.LCPI14_1)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
 ; CHECK-NEXT:    vfadd.vf v16, v16, ft1
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
index 68814215036a2..80ff02524a809 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -28,7 +28,7 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -46,7 +46,7 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI2_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -64,7 +64,7 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI3_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10
@@ -82,7 +82,7 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12
@@ -100,7 +100,7 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI5_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16
@@ -118,7 +118,7 @@ define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI6_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -136,7 +136,7 @@ define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI7_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI7_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -154,7 +154,7 @@ define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI8_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10
@@ -172,7 +172,7 @@ define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI9_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI9_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12
@@ -190,7 +190,7 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI10_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16
@@ -208,7 +208,7 @@ define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI11_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v9, v8, v8
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    vmflt.vf v0, v9, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9
@@ -226,7 +226,7 @@ define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v10, v8, v8
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    vmflt.vf v0, v10, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10
@@ -244,7 +244,7 @@ define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI13_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI13_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v12, v8, v8
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    vmflt.vf v0, v12, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12
@@ -262,7 +262,7 @@ define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v16, v8, v8
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    vmflt.vf v0, v16, ft0
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
index 302174e5aadf5..0a0849fb12eb4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
@@ -6,9 +6,9 @@ define <vscale x 4 x i5> @trunc_nxv4i32_to_nxv4i5(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %v = trunc <vscale x 4 x i32> %a to <vscale x 4 x i5>
   ret <vscale x 4 x i5> %v
@@ -18,9 +18,9 @@ define <vscale x 1 x i5> @trunc_nxv1i32_to_nxv1i5(<vscale x 1 x i32> %a) {
 ; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %v = trunc <vscale x 1 x i32> %a to <vscale x 1 x i5>
   ret <vscale x 1 x i5> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index a4f861068e2f1..ce0677422dae6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1038,7 +1038,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsca
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1065,7 +1065,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsca
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1116,7 +1116,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1143,7 +1143,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1193,7 +1193,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1220,7 +1220,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1246,7 +1246,7 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1969,7 +1969,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base,
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -1996,7 +1996,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base,
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2047,7 +2047,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2074,7 +2074,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2124,7 +2124,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2151,7 +2151,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16
@@ -2177,7 +2177,7 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8f64(double* %base, <vscale x 8
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 411094615c5f6..1bb7b0d82716a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -44,14 +44,14 @@ define void @mscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale
 ; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
@@ -63,18 +63,18 @@ define void @mscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale
 ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
@@ -86,22 +86,22 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale
 ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wx v11, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v11, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v11, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v11
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wx v12, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v12, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v12, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v12
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
@@ -232,14 +232,14 @@ define void @mscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscal
 ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
@@ -251,18 +251,18 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscal
 ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wx v11, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v11, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v11, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v11
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wx v12, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v12, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v12, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v12
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
@@ -466,14 +466,14 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscal
 ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wx v11, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v11, v8
 ; RV32-NEXT:    vsoxei32.v v11, (zero), v10, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wx v12, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v12, v8
 ; RV64-NEXT:    vsoxei64.v v12, (zero), v10, v0.t
 ; RV64-NEXT:    ret
   %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
@@ -827,7 +827,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -852,7 +852,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -899,7 +899,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -924,7 +924,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -970,7 +970,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -995,7 +995,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1019,7 +1019,7 @@ define void @mscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsca
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1609,7 +1609,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1634,7 +1634,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1681,7 +1681,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1706,7 +1706,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1752,7 +1752,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1777,7 +1777,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1801,7 +1801,7 @@ define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base,
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
index b1d0d92a5ed7c..7fb0b39295462 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x half> @vfabs_nxv1f16(<vscale x 1 x half> %v) {
 ; CHECK-LABEL: vfabs_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half> %v)
   ret <vscale x 1 x half> %r
@@ -22,7 +22,7 @@ define <vscale x 2 x half> @vfabs_nxv2f16(<vscale x 2 x half> %v) {
 ; CHECK-LABEL: vfabs_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %v)
   ret <vscale x 2 x half> %r
@@ -34,7 +34,7 @@ define <vscale x 4 x half> @vfabs_nxv4f16(<vscale x 4 x half> %v) {
 ; CHECK-LABEL: vfabs_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %v)
   ret <vscale x 4 x half> %r
@@ -46,7 +46,7 @@ define <vscale x 8 x half> @vfabs_nxv8f16(<vscale x 8 x half> %v) {
 ; CHECK-LABEL: vfabs_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %v)
   ret <vscale x 8 x half> %r
@@ -58,7 +58,7 @@ define <vscale x 16 x half> @vfabs_nxv16f16(<vscale x 16 x half> %v) {
 ; CHECK-LABEL: vfabs_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> %v)
   ret <vscale x 16 x half> %r
@@ -70,7 +70,7 @@ define <vscale x 32 x half> @vfabs_nxv32f16(<vscale x 32 x half> %v) {
 ; CHECK-LABEL: vfabs_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 32 x half> @llvm.fabs.nxv32f16(<vscale x 32 x half> %v)
   ret <vscale x 32 x half> %r
@@ -82,7 +82,7 @@ define <vscale x 1 x float> @vfabs_nxv1f32(<vscale x 1 x float> %v) {
 ; CHECK-LABEL: vfabs_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> %v)
   ret <vscale x 1 x float> %r
@@ -94,7 +94,7 @@ define <vscale x 2 x float> @vfabs_nxv2f32(<vscale x 2 x float> %v) {
 ; CHECK-LABEL: vfabs_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %v)
   ret <vscale x 2 x float> %r
@@ -106,7 +106,7 @@ define <vscale x 4 x float> @vfabs_nxv4f32(<vscale x 4 x float> %v) {
 ; CHECK-LABEL: vfabs_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %v)
   ret <vscale x 4 x float> %r
@@ -118,7 +118,7 @@ define <vscale x 8 x float> @vfabs_nxv8f32(<vscale x 8 x float> %v) {
 ; CHECK-LABEL: vfabs_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> %v)
   ret <vscale x 8 x float> %r
@@ -130,7 +130,7 @@ define <vscale x 16 x float> @vfabs_nxv16f32(<vscale x 16 x float> %v) {
 ; CHECK-LABEL: vfabs_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> %v)
   ret <vscale x 16 x float> %r
@@ -142,7 +142,7 @@ define <vscale x 1 x double> @vfabs_nxv1f64(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: vfabs_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> %v)
   ret <vscale x 1 x double> %r
@@ -154,7 +154,7 @@ define <vscale x 2 x double> @vfabs_nxv2f64(<vscale x 2 x double> %v) {
 ; CHECK-LABEL: vfabs_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %v)
   ret <vscale x 2 x double> %r
@@ -166,7 +166,7 @@ define <vscale x 4 x double> @vfabs_nxv4f64(<vscale x 4 x double> %v) {
 ; CHECK-LABEL: vfabs_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %v)
   ret <vscale x 4 x double> %r
@@ -178,7 +178,7 @@ define <vscale x 8 x double> @vfabs_nxv8f64(<vscale x 8 x double> %v) {
 ; CHECK-LABEL: vfabs_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> %v)
   ret <vscale x 8 x double> %r

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
index 41411cc7f3790..8cceb5f399047 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
@@ -8,7 +8,7 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 1 x half> %va
   ret <vscale x 1 x half> %vb
@@ -18,7 +18,7 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 2 x half> %va
   ret <vscale x 2 x half> %vb
@@ -28,7 +28,7 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 4 x half> %va
   ret <vscale x 4 x half> %vb
@@ -38,7 +38,7 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 8 x half> %va
   ret <vscale x 8 x half> %vb
@@ -48,7 +48,7 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 16 x half> %va
   ret <vscale x 16 x half> %vb
@@ -58,7 +58,7 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 32 x half> %va
   ret <vscale x 32 x half> %vb
@@ -68,7 +68,7 @@ define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 1 x float> %va
   ret <vscale x 1 x float> %vb
@@ -78,7 +78,7 @@ define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 2 x float> %va
   ret <vscale x 2 x float> %vb
@@ -88,7 +88,7 @@ define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 4 x float> %va
   ret <vscale x 4 x float> %vb
@@ -98,7 +98,7 @@ define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 8 x float> %va
   ret <vscale x 8 x float> %vb
@@ -108,7 +108,7 @@ define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 16 x float> %va
   ret <vscale x 16 x float> %vb
@@ -118,7 +118,7 @@ define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 1 x double> %va
   ret <vscale x 1 x double> %vb
@@ -128,7 +128,7 @@ define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 2 x double> %va
   ret <vscale x 2 x double> %vb
@@ -138,7 +138,7 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 4 x double> %va
   ret <vscale x 4 x double> %vb
@@ -148,7 +148,7 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) {
 ; CHECK-LABEL: vfneg_vv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %vb = fneg <vscale x 8 x double> %va
   ret <vscale x 8 x double> %vb

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index cadddc0b5b14f..062a8ad327fa8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -20,7 +20,7 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; CHECK-LABEL: vfneg_vv_nxv1f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
@@ -44,7 +44,7 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; CHECK-LABEL: vfneg_vv_nxv2f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
@@ -68,7 +68,7 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; CHECK-LABEL: vfneg_vv_nxv4f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
@@ -92,7 +92,7 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; CHECK-LABEL: vfneg_vv_nxv8f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
@@ -116,7 +116,7 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; CHECK-LABEL: vfneg_vv_nxv16f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
@@ -140,7 +140,7 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; CHECK-LABEL: vfneg_vv_nxv32f16_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
@@ -164,7 +164,7 @@ define <vscale x 1 x float> @vfneg_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
@@ -188,7 +188,7 @@ define <vscale x 2 x float> @vfneg_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
@@ -212,7 +212,7 @@ define <vscale x 4 x float> @vfneg_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
@@ -236,7 +236,7 @@ define <vscale x 8 x float> @vfneg_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
@@ -260,7 +260,7 @@ define <vscale x 16 x float> @vfneg_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
@@ -284,7 +284,7 @@ define <vscale x 1 x double> @vfneg_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
@@ -308,7 +308,7 @@ define <vscale x 2 x double> @vfneg_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
@@ -332,7 +332,7 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
@@ -356,7 +356,7 @@ define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
@@ -380,7 +380,7 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
@@ -431,13 +431,13 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
 ; CHECK-NEXT:    sub a1, a0, a1
-; CHECK-NEXT:    vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    bltu a0, a1, .LBB33_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    mv a3, a1
 ; CHECK-NEXT:  .LBB33_4:
 ; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, mu
-; CHECK-NEXT:    vfsgnjn.vv v16, v16, v16
+; CHECK-NEXT:    vfneg.v v16, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
   %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
index 7511cb7f13e53..680811037399b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
@@ -636,7 +636,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 1 x float> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -648,7 +648,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 1 x float> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -748,7 +748,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 2 x float> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -760,7 +760,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 2 x float> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -860,7 +860,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 4 x float> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -872,7 +872,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 4 x float> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -972,7 +972,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 8 x float> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -984,7 +984,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 8 x float> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -1084,7 +1084,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 16 x float> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %evec
@@ -1096,7 +1096,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 16 x float> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %evec
@@ -1174,9 +1174,9 @@ define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -1188,9 +1188,9 @@ define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
@@ -1202,7 +1202,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %evec
@@ -1214,7 +1214,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v9, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v9
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %evec
@@ -1292,9 +1292,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -1306,9 +1306,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
@@ -1320,7 +1320,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %evec
@@ -1332,7 +1332,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %evec
@@ -1410,9 +1410,9 @@ define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -1424,9 +1424,9 @@ define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
@@ -1438,7 +1438,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %evec
@@ -1450,7 +1450,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %evec
@@ -1528,9 +1528,9 @@ define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -1542,9 +1542,9 @@ define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
@@ -1556,7 +1556,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    ret
   %evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %evec
@@ -1568,7 +1568,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    ret
   %evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %evec

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index 5a86efc5a6530..49ee7af0327c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -1056,7 +1056,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1082,7 +1082,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsc
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1131,7 +1131,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1157,7 +1157,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1205,7 +1205,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1231,7 +1231,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vs
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1256,7 +1256,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i6
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1851,7 +1851,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i8_nxv6f64(double* %base
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1877,7 +1877,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(double* %base
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1926,7 +1926,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(double* %bas
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -1952,7 +1952,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i16_nxv6f64(double* %bas
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2000,7 +2000,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i32_nxv6f64(double* %bas
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2026,7 +2026,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i32_nxv6f64(double* %bas
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2051,7 +2051,7 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6f64(double* %base, <vscale x
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2117,7 +2117,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base
 ; RV32-NEXT:    vsext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2143,7 +2143,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base
 ; RV32-NEXT:    vzext.vf8 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2192,7 +2192,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %bas
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2218,7 +2218,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %bas
 ; RV32-NEXT:    vzext.vf4 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2266,7 +2266,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %bas
 ; RV32-NEXT:    vsext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2292,7 +2292,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %bas
 ; RV32-NEXT:    vzext.vf2 v16, v8
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2317,7 +2317,7 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8f64(double* %base, <vscale x
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
@@ -2466,7 +2466,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vsext.vf4 v24, v10
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    srli a3, a2, 3
@@ -2480,7 +2480,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a4, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2536,7 +2536,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vzext.vf4 v24, v10
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    srli a3, a2, 3
@@ -2550,7 +2550,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(double* %
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a4, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index d8000278c4fbf..c25944401e226 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -44,7 +44,7 @@ define void @vpscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscal
 ; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -52,7 +52,7 @@ define void @vpscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscal
 ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -65,9 +65,9 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscal
 ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -75,9 +75,9 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscal
 ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -90,11 +90,11 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscal
 ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wx v11, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v11, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v11, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v11
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
@@ -102,11 +102,11 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscal
 ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wx v12, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v12, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v12, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v12
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -230,7 +230,7 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vsca
 ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v8
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
 ; RV32-NEXT:    ret
@@ -238,7 +238,7 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vsca
 ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v8
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -251,9 +251,9 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vsca
 ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wx v11, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v11, v8
 ; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wx v8, v11, zero
+; RV32-NEXT:    vncvt.x.x.w v8, v11
 ; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
 ; RV32-NEXT:    ret
@@ -261,9 +261,9 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vsca
 ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wx v12, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v12, v8
 ; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wx v8, v12, zero
+; RV64-NEXT:    vncvt.x.x.w v8, v12
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -460,7 +460,7 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vsca
 ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wx v11, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v11, v8
 ; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; RV32-NEXT:    vsoxei32.v v11, (zero), v10, v0.t
 ; RV32-NEXT:    ret
@@ -468,7 +468,7 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vsca
 ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wx v12, v8, zero
+; RV64-NEXT:    vncvt.x.x.w v12, v8
 ; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; RV64-NEXT:    vsoxei64.v v12, (zero), v10, v0.t
 ; RV64-NEXT:    ret
@@ -815,7 +815,7 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -841,7 +841,7 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -890,7 +890,7 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -916,7 +916,7 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -964,7 +964,7 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -990,7 +990,7 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1015,7 +1015,7 @@ define void @vpscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsc
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1590,7 +1590,7 @@ define void @vpscatter_baseidx_sext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, d
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1616,7 +1616,7 @@ define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, d
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1665,7 +1665,7 @@ define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1691,7 +1691,7 @@ define void @vpscatter_baseidx_zext_nxv6i16_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1739,7 +1739,7 @@ define void @vpscatter_baseidx_sext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1765,7 +1765,7 @@ define void @vpscatter_baseidx_zext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1790,7 +1790,7 @@ define void @vpscatter_baseidx_nxv6f64(<vscale x 6 x double> %val, double* %base
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1855,7 +1855,7 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1881,7 +1881,7 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, d
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1930,7 +1930,7 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -1956,7 +1956,7 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2004,7 +2004,7 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2030,7 +2030,7 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2055,7 +2055,7 @@ define void @vpscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v16, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsoxei32.v v8, (a0), v24, v0.t
 ; RV32-NEXT:    ret
@@ -2232,7 +2232,7 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vsext.vf4 v16, v26
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    addi a3, sp, 16
 ; RV32-NEXT:    vl8re8.v v8, (a3) # Unknown-size Folded Reload
@@ -2248,7 +2248,7 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a4, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 3
@@ -2348,7 +2348,7 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vzext.vf4 v16, v26
 ; RV32-NEXT:    vsll.vi v8, v8, 3
 ; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v24, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v24, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    addi a3, sp, 16
 ; RV32-NEXT:    vl8re8.v v8, (a3) # Unknown-size Folded Reload
@@ -2364,7 +2364,7 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v16, 3
 ; RV32-NEXT:    vsetvli zero, a4, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wx v16, v8, zero
+; RV32-NEXT:    vncvt.x.x.w v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 3

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index c8390e645c07c..312ecdd09d84d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -507,7 +507,7 @@ define <vscale x 2 x i32> @test_vsetvli_x0_x0(<vscale x 2 x i32>* %x, <vscale x
 ; CHECK-NEXT:  # %bb.1: # %if
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v10, (a1)
-; CHECK-NEXT:    vwadd.vx v8, v10, zero
+; CHECK-NEXT:    vwcvt.x.x.v v8, v10
 ; CHECK-NEXT:  .LBB9_2: # %if.end
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
index c756f7b2223f7..0471d64c66bb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
@@ -6,7 +6,7 @@ define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %tvec
@@ -16,7 +16,7 @@ define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %tvec
@@ -26,7 +26,7 @@ define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %tvec
@@ -36,7 +36,7 @@ define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
@@ -47,7 +47,7 @@ define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
@@ -58,9 +58,9 @@ define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %tvec
@@ -70,7 +70,7 @@ define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %tvec
@@ -80,9 +80,9 @@ define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %tvec
@@ -92,7 +92,7 @@ define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %tvec
@@ -102,9 +102,9 @@ define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %tvec
@@ -114,7 +114,7 @@ define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
@@ -125,9 +125,9 @@ define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %tvec
@@ -137,7 +137,7 @@ define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
@@ -148,9 +148,9 @@ define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %tvec
@@ -160,7 +160,7 @@ define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
@@ -171,11 +171,11 @@ define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %tvec
@@ -185,9 +185,9 @@ define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %tvec
@@ -197,7 +197,7 @@ define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %tvec
@@ -207,11 +207,11 @@ define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %tvec
@@ -221,9 +221,9 @@ define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %tvec
@@ -233,7 +233,7 @@ define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
@@ -244,11 +244,11 @@ define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %tvec
@@ -258,9 +258,9 @@ define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v12, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %tvec
@@ -270,7 +270,7 @@ define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v12, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v12, v8
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
@@ -281,11 +281,11 @@ define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v10, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v10, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v10, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v10
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %tvec
@@ -295,9 +295,9 @@ define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v16, zero
+; CHECK-NEXT:    vncvt.x.x.w v8, v16
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %tvec
@@ -307,7 +307,7 @@ define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wx v16, v8, zero
+; CHECK-NEXT:    vncvt.x.x.w v16, v8
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
   %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
index b63fde3305b03..0ff187779206f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
@@ -28,7 +28,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -87,7 +87,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -146,7 +146,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -205,7 +205,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -264,7 +264,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -323,7 +323,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
@@ -382,7 +382,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
   %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
@@ -441,7 +441,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
   %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -500,7 +500,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
   %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -559,7 +559,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
   %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -618,7 +618,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
   %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -677,7 +677,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv16i16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
   %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
@@ -736,7 +736,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv32i16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
   %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
@@ -795,7 +795,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
   %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
@@ -854,7 +854,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
   %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
@@ -913,7 +913,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
   %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -972,7 +972,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
@@ -1031,7 +1031,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv16i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
   %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
@@ -1103,7 +1103,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
@@ -1175,7 +1175,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -1247,7 +1247,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -1319,7 +1319,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
index 2fd2c8aaaf3d7..906f1f6d6007c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
@@ -110,7 +110,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_unmasked_1(<vscale x 1 x i8> %va, i32 z
 ; CHECK-LABEL: vxor_vi_nxv1i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -212,7 +212,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_unmasked_1(<vscale x 2 x i8> %va, i32 z
 ; CHECK-LABEL: vxor_vi_nxv2i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -314,7 +314,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_unmasked_1(<vscale x 4 x i8> %va, i32 z
 ; CHECK-LABEL: vxor_vi_nxv4i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -416,7 +416,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_unmasked_1(<vscale x 8 x i8> %va, i32 z
 ; CHECK-LABEL: vxor_vi_nxv8i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -518,7 +518,7 @@ define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked_1(<vscale x 15 x i8> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 15 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer
@@ -620,7 +620,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_unmasked_1(<vscale x 16 x i8> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv16i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -722,7 +722,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_unmasked_1(<vscale x 32 x i8> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv32i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
@@ -824,7 +824,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_unmasked_1(<vscale x 64 x i8> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv64i8_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
@@ -938,7 +938,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_unmasked_1(<vscale x 1 x i16> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv1i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -1040,7 +1040,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_unmasked_1(<vscale x 2 x i16> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv2i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -1142,7 +1142,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_unmasked_1(<vscale x 4 x i16> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv4i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -1244,7 +1244,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_unmasked_1(<vscale x 8 x i16> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv8i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -1346,7 +1346,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_unmasked_1(<vscale x 16 x i16> %va,
 ; CHECK-LABEL: vxor_vi_nxv16i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
@@ -1448,7 +1448,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_unmasked_1(<vscale x 32 x i16> %va,
 ; CHECK-LABEL: vxor_vi_nxv32i16_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
@@ -1550,7 +1550,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_unmasked_1(<vscale x 1 x i32> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv1i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
@@ -1652,7 +1652,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_unmasked_1(<vscale x 2 x i32> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv2i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
@@ -1754,7 +1754,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_unmasked_1(<vscale x 4 x i32> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv4i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -1856,7 +1856,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_unmasked_1(<vscale x 8 x i32> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv8i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
@@ -1958,7 +1958,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_unmasked_1(<vscale x 16 x i32> %va,
 ; CHECK-LABEL: vxor_vi_nxv16i32_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
@@ -2088,7 +2088,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_unmasked_1(<vscale x 1 x i64> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv1i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
@@ -2218,7 +2218,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_unmasked_1(<vscale x 2 x i64> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv2i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -2348,7 +2348,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_unmasked_1(<vscale x 4 x i64> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv4i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -2478,7 +2478,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_unmasked_1(<vscale x 8 x i64> %va, i3
 ; CHECK-LABEL: vxor_vi_nxv8i64_unmasked_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/MC/RISCV/rvv/add.s b/llvm/test/MC/RISCV/rvv/add.s
index 17e84b00cce3d..1df1f64784c01 100644
--- a/llvm/test/MC/RISCV/rvv/add.s
+++ b/llvm/test/MC/RISCV/rvv/add.s
@@ -345,7 +345,7 @@ vwcvt.x.x.v v8, v4, v0.t
 # CHECK-UNKNOWN: 57 64 40 c4 <unknown>
 
 vwcvt.x.x.v v8, v4
-# CHECK-INST: vwadd.vx v8, v4, zero
+# CHECK-INST: vwcvt.x.x.v v8, v4
 # CHECK-ENCODING: [0x57,0x64,0x40,0xc6]
 # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors)
 # CHECK-UNKNOWN: 57 64 40 c6 <unknown>
@@ -357,7 +357,7 @@ vwcvtu.x.x.v v8, v4, v0.t
 # CHECK-UNKNOWN: 57 64 40 c0 <unknown>
 
 vwcvtu.x.x.v v8, v4
-# CHECK-INST: vwaddu.vx v8, v4, zero
+# CHECK-INST: vwcvtu.x.x.v v8, v4
 # CHECK-ENCODING: [0x57,0x64,0x40,0xc2]
 # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors)
 # CHECK-UNKNOWN: 57 64 40 c2 <unknown>

diff  --git a/llvm/test/MC/RISCV/rvv/xor.s b/llvm/test/MC/RISCV/rvv/xor.s
index 436d4505d7e99..c348d89add00d 100644
--- a/llvm/test/MC/RISCV/rvv/xor.s
+++ b/llvm/test/MC/RISCV/rvv/xor.s
@@ -51,7 +51,7 @@ vnot.v v8, v4, v0.t
 # CHECK-UNKNOWN: 57 b4 4f 2c <unknown>
 
 vnot.v v8, v4
-# CHECK-INST: vxor.vi v8, v4, -1
+# CHECK-INST: vnot.v v8, v4
 # CHECK-ENCODING: [0x57,0xb4,0x4f,0x2e]
 # CHECK-ERROR: instruction requires the following: 'V' (Vector Extension for Application Processors), 'Zve32x' or 'Zve64x' (Vector Extensions for Embedded Processors)
 # CHECK-UNKNOWN: 57 b4 4f 2e <unknown>

diff  --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp
index c85fd87d37732..2edac05afb89f 100644
--- a/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -867,8 +867,6 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
 
       IAPrinter IAP(CGA.Result->getAsString(), FlatAliasAsmString, NumMIOps);
 
-      bool CantHandle = false;
-
       unsigned MIOpNum = 0;
       for (unsigned i = 0, e = LastOpNo; i != e; ++i) {
         // Skip over tied operands as they're not part of an alias declaration.
@@ -968,10 +966,9 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
           break;
         }
         case CodeGenInstAlias::ResultOperand::K_Reg:
-          // If this is zero_reg, something's playing tricks we're not
-          // equipped to handle.
           if (!CGA.ResultOperands[i].getRegister()) {
-            CantHandle = true;
+            IAP.addCond(std::string(formatv(
+                "AliasPatternCond::K_Reg, {0}::NoRegister", Namespace)));
             break;
           }
 
@@ -984,8 +981,6 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
         MIOpNum += RO.getMINumOperands();
       }
 
-      if (CantHandle) continue;
-
       std::vector<Record *> ReqFeatures;
       if (PassSubtarget) {
         // We only consider ReqFeatures predicates if PassSubtarget


        


More information about the llvm-commits mailing list