[llvm] ae3ab4f - [RISCV] Masked compares should use a tail agnostic policy.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 7 21:56:01 PDT 2021


Author: Craig Topper
Date: 2021-06-07T21:43:44-07:00
New Revision: ae3ab4f0ec6190ebc19775002b59a36c06848bf3

URL: https://github.com/llvm/llvm-project/commit/ae3ab4f0ec6190ebc19775002b59a36c06848bf3
DIFF: https://github.com/llvm/llvm-project/commit/ae3ab4f0ec6190ebc19775002b59a36c06848bf3.diff

LOG: [RISCV] Masked compares should use a tail agnostic policy.

Writes of a mask result are always tail agnostic.

Unfortunately, this seems to have made codegen worse. I can only
think this must be because the vsetvli was acting as some sort
of barrier that prevented some code movement in the scheduler.

Reviewed By: arcbbb

Differential Revision: https://reviews.llvm.org/D103331

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 168fb2e6769fb..74bd0d4bbea82 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1486,6 +1486,7 @@ multiclass VPseudoBinaryM<VReg RetClass,
   let VLMul = MInfo.value in {
     def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
                                              Constraint>;
+    let ForceTailAgnostic = true in
     def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
                                                          Op2Class, Constraint>;
   }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
index 93aec3bec398f..e69d34f451be7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1163,7 +1163,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1220,7 +1220,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
index b703f8fb40d31..4591cc15aeb9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfeq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfeq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1099,7 +1099,7 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1197,7 +1197,7 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
index 6469700814954..306cf39b298e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1163,7 +1163,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1220,7 +1220,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
index 46a3cb7d61311..233d3faf5b10d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1099,7 +1099,7 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1197,7 +1197,7 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
index bc9f053358e47..af37e03c1e346 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1163,7 +1163,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1220,7 +1220,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
index 035d792e7f54d..56a658a94d77d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1099,7 +1099,7 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1197,7 +1197,7 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
index c2166b362e2ff..ba809c33743c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1163,7 +1163,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1220,7 +1220,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
index 578c948837ce4..b50ba187b9562 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1099,7 +1099,7 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1197,7 +1197,7 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
index 593c8fd4d19f7..710a2dcae980a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1163,7 +1163,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1220,7 +1220,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
index 25d30c63aef7a..4eafb5fe8dfb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmflt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmflt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1099,7 +1099,7 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1197,7 +1197,7 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
index fef9b46a134a6..e3e0ce4a03b40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1106,7 +1106,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1163,7 +1163,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1220,7 +1220,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK-NEXT:    sw a1, 12(sp)
 ; CHECK-NEXT:    fld ft0, 8(sp)
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
index 4a934766faf74..fecff398ff134 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
 define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
@@ -291,12 +291,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
@@ -343,12 +343,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
@@ -395,12 +395,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
@@ -447,12 +447,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
 define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
@@ -499,12 +499,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
 define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
@@ -551,12 +551,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
 define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
@@ -603,12 +603,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
 define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmfne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmfne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
@@ -658,7 +658,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -707,7 +707,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -756,7 +756,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -805,7 +805,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -854,7 +854,7 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -903,7 +903,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -952,7 +952,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1001,7 +1001,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1050,7 +1050,7 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.w.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1099,7 +1099,7 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1197,7 +1197,7 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    fmv.d.x ft0, a0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
index b6392257d717d..9e96f00d49cba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
index 42ec9ba3ce8c0..9e1fe1770d970 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmseq.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmseq.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
index dbe330c4c9881..2aca85b4f8f4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
@@ -969,10 +969,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1018,10 +1017,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1067,10 +1065,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1116,10 +1113,9 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1165,10 +1161,9 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1214,10 +1209,9 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1263,10 +1257,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1312,10 +1305,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,10 +1353,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1410,10 +1401,9 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1459,10 +1449,9 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1508,10 +1497,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1557,10 +1545,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1606,10 +1593,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1655,10 +1641,9 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1715,7 +1700,6 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1775,7 +1759,6 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1835,7 +1818,6 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1871,7 +1853,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1906,7 +1888,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1941,7 +1923,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1976,7 +1958,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2011,7 +1993,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2046,7 +2028,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2081,7 +2063,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2116,7 +2098,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2151,7 +2133,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2186,7 +2168,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2221,7 +2203,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2256,7 +2238,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2291,7 +2273,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2326,7 +2308,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2361,7 +2343,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2396,7 +2378,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2431,7 +2413,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2466,7 +2448,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2762,7 +2744,6 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v0, v25, v8, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
@@ -2787,7 +2768,6 @@ define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
@@ -2813,7 +2793,6 @@ define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
index 4fdd4c5e0ea87..cee80c2085e99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
@@ -969,10 +969,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1018,10 +1017,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1067,10 +1065,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1116,10 +1113,9 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1165,10 +1161,9 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1214,10 +1209,9 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1263,10 +1257,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1312,10 +1305,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,10 +1353,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1410,10 +1401,9 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1459,10 +1449,9 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1508,10 +1497,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1557,10 +1545,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1606,10 +1593,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1655,10 +1641,9 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1704,10 +1689,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1753,10 +1737,9 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1802,10 +1785,9 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1838,7 +1820,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1873,7 +1855,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1908,7 +1890,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1943,7 +1925,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1978,7 +1960,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2013,7 +1995,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2048,7 +2030,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2083,7 +2065,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2118,7 +2100,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2153,7 +2135,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2188,7 +2170,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2223,7 +2205,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2258,7 +2240,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2293,7 +2275,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2328,7 +2310,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2363,7 +2345,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2398,7 +2380,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2433,7 +2415,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
index d011c496b824d..bbad18a07105a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
@@ -969,10 +969,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1018,10 +1017,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1067,10 +1065,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1116,10 +1113,9 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1165,10 +1161,9 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1214,10 +1209,9 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1263,10 +1257,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1312,10 +1305,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,10 +1353,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1410,10 +1401,9 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1459,10 +1449,9 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1508,10 +1497,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1557,10 +1545,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1606,10 +1593,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1655,10 +1641,9 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1715,7 +1700,6 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1775,7 +1759,6 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1835,7 +1818,6 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1871,7 +1853,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1906,7 +1888,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1941,7 +1923,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1976,7 +1958,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2011,7 +1993,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2046,7 +2028,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2081,7 +2063,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2116,7 +2098,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2151,7 +2133,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2186,7 +2168,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2221,7 +2203,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2256,7 +2238,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2291,7 +2273,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2326,7 +2308,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2361,7 +2343,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2396,7 +2378,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2431,7 +2413,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2466,7 +2448,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2762,7 +2744,6 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v25, v8, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
@@ -2787,7 +2768,6 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
@@ -2813,7 +2793,6 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
index efb575367714b..6bfeaf35478d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
@@ -969,10 +969,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1018,10 +1017,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1067,10 +1065,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1116,10 +1113,9 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1165,10 +1161,9 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1214,10 +1209,9 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1263,10 +1257,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1312,10 +1305,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,10 +1353,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1410,10 +1401,9 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1459,10 +1449,9 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1508,10 +1497,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1557,10 +1545,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1606,10 +1593,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1655,10 +1641,9 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1704,10 +1689,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1753,10 +1737,9 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1802,10 +1785,9 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1838,7 +1820,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1873,7 +1855,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1908,7 +1890,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1943,7 +1925,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1978,7 +1960,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2013,7 +1995,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2048,7 +2030,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2083,7 +2065,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2118,7 +2100,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2153,7 +2135,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2188,7 +2170,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2223,7 +2205,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2258,7 +2240,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2293,7 +2275,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2328,7 +2310,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2363,7 +2345,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2398,7 +2380,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2433,7 +2415,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
index a9fa4fa98c3d5..8995ba04a75a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
index 64c27d519ebb4..ccaf5b0cd1993 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
index af1e6ed4895f3..df93885ecdb96 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
index a1e074b1497bb..c44e404d9d994 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v9, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v10, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v12, v8
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v16, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
index 38416b7235be1..35da0e02e0360 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
index fcdc5a1987f2b..60ea2304fbf78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsle.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
index 0ae6e4507c365..ab39840291df2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
index 34c2a13ed4163..ab5603191bf80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsleu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
index 0e62148ba422a..79dc094f0683d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
index c76a76189a182..8ce3a253fccd0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmslt.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
index f92cf4d423ad8..ba3b8344088c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
index 7cbbcfe3064c5..9efd7fe946f66 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsltu.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsltu.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
index 3623502887af6..ee2ec04a0f793 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1685,7 +1685,6 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1745,7 +1744,6 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1805,7 +1803,6 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1841,7 +1838,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1876,7 +1873,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1911,7 +1908,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1946,7 +1943,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1981,7 +1978,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2016,7 +2013,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2051,7 +2048,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2086,7 +2083,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2121,7 +2118,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2156,7 +2153,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2191,7 +2188,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2226,7 +2223,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2261,7 +2258,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2296,7 +2293,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2331,7 +2328,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2366,7 +2363,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2401,7 +2398,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2436,7 +2433,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
index 56a7448a9b3a2..99f2549ac646d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
@@ -31,12 +31,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
@@ -83,12 +83,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
@@ -135,12 +135,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
@@ -187,12 +187,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
@@ -239,12 +239,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
@@ -291,12 +291,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
 define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
@@ -343,12 +343,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
@@ -395,12 +395,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
@@ -447,12 +447,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
@@ -499,12 +499,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
@@ -551,12 +551,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
 define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
@@ -603,12 +603,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
@@ -655,12 +655,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
@@ -707,12 +707,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
@@ -759,12 +759,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
 define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
@@ -811,12 +811,12 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
 define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v9
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
@@ -863,12 +863,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
 define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v10
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
@@ -915,12 +915,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
 define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
-; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmsne.vv v25, v8, v12
+; CHECK-NEXT:    vmv1r.v v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsne.vv v26, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
@@ -968,7 +968,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1015,7 +1015,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1062,7 +1062,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1109,7 +1109,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1156,7 +1156,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1203,7 +1203,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1250,7 +1250,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1297,7 +1297,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1344,7 +1344,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1438,7 +1438,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1485,7 +1485,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1532,7 +1532,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1579,7 +1579,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1626,7 +1626,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1673,7 +1673,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1720,7 +1720,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1767,7 +1767,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1802,7 +1802,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1837,7 +1837,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1872,7 +1872,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1907,7 +1907,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1942,7 +1942,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -1977,7 +1977,7 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2012,7 +2012,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2047,7 +2047,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2082,7 +2082,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2117,7 +2117,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2152,7 +2152,7 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2187,7 +2187,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2222,7 +2222,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2257,7 +2257,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2292,7 +2292,7 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2327,7 +2327,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2362,7 +2362,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
@@ -2397,7 +2397,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v25, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25


        


More information about the llvm-commits mailing list