[llvm] c57bce9 - [RISCV] Remove ForceTailAgnostic flag from vmv.s.x, vfmv.s.f and reductions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 8 09:22:50 PDT 2021


Author: Craig Topper
Date: 2021-06-08T09:22:40-07:00
New Revision: c57bce9cc5facbdde52c24fe64fa4f6bf23a8449

URL: https://github.com/llvm/llvm-project/commit/c57bce9cc5facbdde52c24fe64fa4f6bf23a8449
DIFF: https://github.com/llvm/llvm-project/commit/c57bce9cc5facbdde52c24fe64fa4f6bf23a8449.diff

LOG: [RISCV] Remove ForceTailAgnostic flag from vmv.s.x, vfmv.s.f and reductions.

In 0.9 these were defined to leave elements other than 0 in the
destination unmodified. They were changed to use the tail policy
in 0.10. I missed that update.

I assume no one has noticed because in order cores treat tail
agnostic the same as tail undisturbed. I believe Spike and QEMU do
the same.

Reviewed By: arcbbb, frasercrmck

Differential Revision: https://reviews.llvm.org/D103736

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 74bd0d4bbea8..9a1c32262a4b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1978,7 +1978,6 @@ multiclass VPseudoBinaryM_VX_VI {
 
 multiclass VPseudoReductionV_VS {
   foreach m = MxList.m in {
-    let ForceTailAgnostic = true in
     defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
   }
 }
@@ -3809,7 +3808,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
                                              (ins m.vrclass:$rs2, ixlenimm:$sew),
                                              []>, RISCVVPseudo;
       let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
-          ForceTailAgnostic = true, Constraints = "$rd = $rs1" in
+          Constraints = "$rd = $rs1" in
       def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
                                              (ins m.vrclass:$rs1, GPR:$rs2,
                                                   AVL:$vl, ixlenimm:$sew),
@@ -3835,7 +3834,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
                                                       ixlenimm:$sew),
                                                  []>, RISCVVPseudo;
         let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
-            ForceTailAgnostic = true, Constraints = "$rd = $rs1" in
+            Constraints = "$rd = $rs1" in
         def "PseudoVFMV_S_" # f.FX # "_" # m.MX :
                                           Pseudo<(outs m.vrclass:$rd),
                                                  (ins m.vrclass:$rs1, f.fprclass:$rs2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
index 13ca95f73f6c..1ef7323e85d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
@@ -679,7 +679,9 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    or a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    or a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    or a1, a1, a2
+; LMULMAX2-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX2-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX2-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
 ;
@@ -809,7 +811,9 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
@@ -2272,6 +2276,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v26, a1
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v25, 1
@@ -2321,7 +2326,9 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    or a1, a1, a2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, a1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vse64.v v27, (a0)
 ; LMULMAX1-RV64-NEXT:    vse64.v v26, (a6)
 ; LMULMAX1-RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index 49e59f062458..ccf3c519846d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -3890,7 +3890,9 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX2-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX2-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX2-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
 ;
@@ -4123,7 +4125,9 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
@@ -11898,6 +11902,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v26, a1
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v25, 1
@@ -11955,7 +11960,9 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, a1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vse64.v v27, (a0)
 ; LMULMAX1-RV64-NEXT:    vse64.v v26, (a6)
 ; LMULMAX1-RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
index 991b407b6234..29040369fb53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
@@ -2719,7 +2719,9 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX2-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX2-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX2-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
 ;
@@ -2901,7 +2903,9 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
@@ -8250,6 +8254,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, a2
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 1
@@ -8287,7 +8292,9 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a1, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vse64.v v26, (a0)
 ; LMULMAX1-RV64-NEXT:    vse64.v v27, (a6)
 ; LMULMAX1-RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
index 6c485b088a68..d671e5127941 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
@@ -10,7 +10,9 @@ define <1 x i1> @insertelt_v1i1(<1 x i1> %x, i1 %elt) nounwind {
 ; CHECK-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v25, v25, 1
 ; CHECK-NEXT:    vmsne.vi v0, v25, 0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index e69afe2c3538..b8a16c85320f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -53,6 +53,7 @@ define void @insertelt_v3i64(<3 x i64>* %x, i64 %y) {
 ; RV32-NEXT:    addi a4, a0, 20
 ; RV32-NEXT:    vsetivli zero, 4, e32,m1,ta,mu
 ; RV32-NEXT:    vlse32.v v26, (a4), zero
+; RV32-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
 ; RV32-NEXT:    vmv.s.x v26, a3
 ; RV32-NEXT:    vsetvli zero, zero, e64,m2,tu,mu
 ; RV32-NEXT:    vslideup.vi v28, v26, 2
@@ -165,7 +166,9 @@ define void @insertelt_v8i64_0(<8 x i64>* %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v28, a1
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v28, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
@@ -213,7 +216,9 @@ define void @insertelt_c6_v8i64_0(<8 x i64>* %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v28, a1
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v28, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
@@ -262,9 +267,11 @@ define void @insertelt_c6_v8i64_0_add(<8 x i64>* %x, <8 x i64>* %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    addi a2, zero, 6
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vmv.s.x v28, a2
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a1)
-; CHECK-NEXT:    addi a1, zero, 6
-; CHECK-NEXT:    vmv.s.x v28, a1
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
 ; CHECK-NEXT:    vse64.v v28, (a0)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index fe70e2a6f1d5..fb2e56c0faf8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -116,7 +116,9 @@ define void @buildvec_dominant2_v2i8(<2 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v25, zero
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   store <2 x i8> <i8 0, i8 -1>, <2 x i8>* %x
@@ -145,7 +147,9 @@ define void @buildvec_dominant0_v2i32(<2 x i64>* %x) {
 ; RV64-NEXT:    addi a1, a1, -455
 ; RV64-NEXT:    slli a1, a1, 13
 ; RV64-NEXT:    addi a1, a1, -910
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; RV64-NEXT:    vmv.s.x v25, a1
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vse64.v v25, (a0)
 ; RV64-NEXT:    ret
   store <2 x i64> <i64 2049638230412172402, i64 -1>, <2 x i64>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 7d5f19b539db..428089995205 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -954,6 +954,7 @@ define void @mulhu_v8i16(<8 x i16>* %x) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v28, 0
 ; CHECK-NEXT:    lui a1, 1048568
+; CHECK-NEXT:    vsetvli zero, zero, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v29, v28
 ; CHECK-NEXT:    vmv.s.x v29, a1
 ; CHECK-NEXT:    vsetivli zero, 7, e16,m1,tu,mu
@@ -1035,6 +1036,7 @@ define void @mulhu_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    vle64.v v25, (a0)
 ; RV64-NEXT:    vmv.v.i v26, 2
 ; RV64-NEXT:    addi a1, zero, 1
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; RV64-NEXT:    vmv.s.x v26, a1
 ; RV64-NEXT:    lui a1, 1035469
 ; RV64-NEXT:    addiw a1, a1, -819
@@ -1044,6 +1046,7 @@ define void @mulhu_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    addi a1, a1, -819
 ; RV64-NEXT:    slli a1, a1, 12
 ; RV64-NEXT:    addi a1, a1, -819
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vmv.v.x v27, a1
 ; RV64-NEXT:    lui a1, 1026731
 ; RV64-NEXT:    addiw a1, a1, -1365
@@ -1053,7 +1056,9 @@ define void @mulhu_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    addi a1, a1, -1365
 ; RV64-NEXT:    slli a1, a1, 12
 ; RV64-NEXT:    addi a1, a1, -1365
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; RV64-NEXT:    vmv.s.x v27, a1
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vmulhu.vv v25, v25, v27
 ; RV64-NEXT:    vsrl.vv v25, v25, v26
 ; RV64-NEXT:    vse64.v v25, (a0)
@@ -1217,6 +1222,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e32,m1,ta,mu
 ; RV32-NEXT:    vmv.v.x v26, a2
 ; RV32-NEXT:    addi a1, a1, 1366
+; RV32-NEXT:    vsetvli zero, zero, e32,m1,tu,mu
 ; RV32-NEXT:    vmv.s.x v26, a1
 ; RV32-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
 ; RV32-NEXT:    vmulh.vv v26, v25, v26
@@ -1248,7 +1254,9 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
 ; RV64-NEXT:    vle64.v v25, (a0)
 ; RV64-NEXT:    vmv.v.i v26, -1
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; RV64-NEXT:    vmv.s.x v26, zero
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vmul.vv v26, v25, v26
 ; RV64-NEXT:    lui a1, 21845
 ; RV64-NEXT:    addiw a1, a1, 1365
@@ -1260,7 +1268,9 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    addi a2, a1, 1365
 ; RV64-NEXT:    vmv.v.x v27, a2
 ; RV64-NEXT:    addi a1, a1, 1366
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; RV64-NEXT:    vmv.s.x v27, a1
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vmulh.vv v25, v25, v27
 ; RV64-NEXT:    vadd.vv v25, v25, v26
 ; RV64-NEXT:    addi a1, zero, 63
@@ -4376,6 +4386,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    vmv.v.i v27, 0
 ; LMULMAX1-RV64-NEXT:    addi a2, zero, -1
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 63
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, a2
 ; LMULMAX1-RV64-NEXT:    lui a2, 1044935
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, 455
@@ -4385,6 +4396,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 455
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 13
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 911
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmv.v.x v28, a2
 ; LMULMAX1-RV64-NEXT:    lui a2, 4681
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, 585
@@ -4394,17 +4406,22 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 585
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 13
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 1171
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v28, a2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmulhu.vv v28, v26, v28
 ; LMULMAX1-RV64-NEXT:    vsub.vv v26, v26, v28
 ; LMULMAX1-RV64-NEXT:    vmulhu.vv v26, v26, v27
 ; LMULMAX1-RV64-NEXT:    vadd.vv v26, v26, v28
 ; LMULMAX1-RV64-NEXT:    vmv.v.i v27, 3
 ; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, a2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vsrl.vv v26, v26, v27
 ; LMULMAX1-RV64-NEXT:    vmv.v.i v27, 2
 ; LMULMAX1-RV64-NEXT:    addi a2, zero, 1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, a2
 ; LMULMAX1-RV64-NEXT:    lui a2, 1035469
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, -819
@@ -4414,6 +4431,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, -819
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 12
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, -819
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmv.v.x v28, a2
 ; LMULMAX1-RV64-NEXT:    lui a2, 1026731
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, -1365
@@ -4423,7 +4441,9 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, -1365
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 12
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, -1365
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v28, a2
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmulhu.vv v25, v25, v28
 ; LMULMAX1-RV64-NEXT:    vsrl.vv v25, v25, v27
 ; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
@@ -4776,12 +4796,14 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-LABEL: mulhs_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
-; LMULMAX1-RV64-NEXT:    vle64.v v25, (a1)
-; LMULMAX1-RV64-NEXT:    vle64.v v26, (a0)
+; LMULMAX1-RV64-NEXT:    vle64.v v26, (a1)
 ; LMULMAX1-RV64-NEXT:    vmv.v.i v27, -1
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v27, zero
-; LMULMAX1-RV64-NEXT:    vmul.vv v28, v25, v27
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vmul.vv v28, v26, v27
 ; LMULMAX1-RV64-NEXT:    lui a2, 21845
 ; LMULMAX1-RV64-NEXT:    addiw a2, a2, 1365
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 12
@@ -4792,22 +4814,24 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a3, a2, 1365
 ; LMULMAX1-RV64-NEXT:    vmv.v.x v29, a3
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 1366
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v29, a2
-; LMULMAX1-RV64-NEXT:    vmulh.vv v25, v25, v29
-; LMULMAX1-RV64-NEXT:    vadd.vv v25, v25, v28
+; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vmulh.vv v26, v26, v29
+; LMULMAX1-RV64-NEXT:    vadd.vv v26, v26, v28
 ; LMULMAX1-RV64-NEXT:    addi a2, zero, 63
-; LMULMAX1-RV64-NEXT:    vsrl.vx v28, v25, a2
+; LMULMAX1-RV64-NEXT:    vsrl.vx v28, v26, a2
 ; LMULMAX1-RV64-NEXT:    vid.v v30
-; LMULMAX1-RV64-NEXT:    vsra.vv v25, v25, v30
-; LMULMAX1-RV64-NEXT:    vadd.vv v25, v25, v28
-; LMULMAX1-RV64-NEXT:    vmul.vv v27, v26, v27
-; LMULMAX1-RV64-NEXT:    vmulh.vv v26, v26, v29
-; LMULMAX1-RV64-NEXT:    vadd.vv v26, v26, v27
-; LMULMAX1-RV64-NEXT:    vsrl.vx v27, v26, a2
 ; LMULMAX1-RV64-NEXT:    vsra.vv v26, v26, v30
-; LMULMAX1-RV64-NEXT:    vadd.vv v26, v26, v27
-; LMULMAX1-RV64-NEXT:    vse64.v v26, (a0)
-; LMULMAX1-RV64-NEXT:    vse64.v v25, (a1)
+; LMULMAX1-RV64-NEXT:    vadd.vv v26, v26, v28
+; LMULMAX1-RV64-NEXT:    vmul.vv v27, v25, v27
+; LMULMAX1-RV64-NEXT:    vmulh.vv v25, v25, v29
+; LMULMAX1-RV64-NEXT:    vadd.vv v25, v25, v27
+; LMULMAX1-RV64-NEXT:    vsrl.vx v27, v25, a2
+; LMULMAX1-RV64-NEXT:    vsra.vv v25, v25, v30
+; LMULMAX1-RV64-NEXT:    vadd.vv v25, v25, v27
+; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT:    vse64.v v26, (a1)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = load <4 x i64>, <4 x i64>* %x
   %b = sdiv <4 x i64> %a, <i64 3, i64 -3, i64 3, i64 -3>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index a21f3f8632bd..6c2520a7cd00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -37,7 +37,9 @@ define <2 x i1> @buildvec_mask_nonconst_v2i1(i1 %x, i1 %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v25, a0
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v25, v25, 1
 ; CHECK-NEXT:    vmsne.vi v0, v25, 0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
index e3a884c6afcc..593239dc8fc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
@@ -23,6 +23,7 @@ define <2 x half> @select_v2f16(i1 zeroext %c, <2 x half> %a, <2 x half> %b) {
 ; CHECK-NEXT:    vfmv.f.s ft1, v25
 ; CHECK-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, ft1
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, ft0
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <2 x half> %a, <2 x half> %b
@@ -52,6 +53,7 @@ define <2 x half> @selectcc_v2f16(half %a, half %b, <2 x half> %c, <2 x half> %d
 ; CHECK-NEXT:  .LBB1_5:
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:  .LBB1_6:
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v25, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    ret
@@ -1046,6 +1048,7 @@ define <2 x float> @select_v2f32(i1 zeroext %c, <2 x float> %a, <2 x float> %b)
 ; CHECK-NEXT:    vfmv.f.s ft1, v25
 ; CHECK-NEXT:    vsetivli zero, 2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, ft1
+; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, ft0
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <2 x float> %a, <2 x float> %b
@@ -1075,6 +1078,7 @@ define <2 x float> @selectcc_v2f32(float %a, float %b, <2 x float> %c, <2 x floa
 ; CHECK-NEXT:  .LBB9_5:
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:  .LBB9_6:
+; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v25, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    ret
@@ -2280,6 +2284,7 @@ define <2 x double> @select_v2f64(i1 zeroext %c, <2 x double> %a, <2 x double> %
 ; CHECK-NEXT:    vfmv.f.s ft1, v25
 ; CHECK-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, ft1
+; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, ft0
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <2 x double> %a, <2 x double> %b
@@ -2309,6 +2314,7 @@ define <2 x double> @selectcc_v2f64(double %a, double %b, <2 x double> %c, <2 x
 ; CHECK-NEXT:  .LBB17_5:
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:  .LBB17_6:
+; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v25, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll
index 3b59ad792d2a..0a1b20e56e7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll
@@ -202,7 +202,9 @@ define <4 x i64> @stepvector_v4i64() {
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 3
 ; LMULMAX1-NEXT:    addi a0, zero, 2
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v9, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vid.v v8
 ; LMULMAX1-NEXT:    ret
 ;
@@ -223,13 +225,19 @@ define <8 x i64> @stepvector_v8i64() {
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 3
 ; LMULMAX1-NEXT:    addi a0, zero, 2
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v9, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v10, 5
 ; LMULMAX1-NEXT:    addi a0, zero, 4
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v10, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v11, 7
 ; LMULMAX1-NEXT:    addi a0, zero, 6
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v11, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vid.v v8
 ; LMULMAX1-NEXT:    ret
 ;
@@ -253,25 +261,39 @@ define <16 x i64> @stepvector_v16i64() {
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v9, 3
 ; LMULMAX1-NEXT:    addi a0, zero, 2
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v9, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v10, 5
 ; LMULMAX1-NEXT:    addi a0, zero, 4
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v10, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v11, 7
 ; LMULMAX1-NEXT:    addi a0, zero, 6
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v11, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v12, 9
 ; LMULMAX1-NEXT:    addi a0, zero, 8
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v12, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v13, 11
 ; LMULMAX1-NEXT:    addi a0, zero, 10
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v13, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v14, 13
 ; LMULMAX1-NEXT:    addi a0, zero, 12
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v14, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v15, 15
 ; LMULMAX1-NEXT:    addi a0, zero, 14
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
 ; LMULMAX1-NEXT:    vmv.s.x v15, a0
+; LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vid.v v8
 ; LMULMAX1-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 4db8ceee17d0..2ee2f6863a62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -356,7 +356,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV32-NEXT:    lbu a1, 0(a1)
 ; RV32-NEXT:    slli a2, a2, 8
 ; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
+; RV32-NEXT:    vsetivli zero, 2, e16,mf4,tu,mu
 ; RV32-NEXT:    vmv.s.x v9, a1
 ; RV32-NEXT:  .LBB4_2: # %else
 ; RV32-NEXT:    andi a0, a0, 2
@@ -403,7 +403,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
 ; RV64-NEXT:    lbu a1, 0(a1)
 ; RV64-NEXT:    slli a2, a2, 8
 ; RV64-NEXT:    or a1, a2, a1
-; RV64-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
+; RV64-NEXT:    vsetivli zero, 2, e16,mf4,tu,mu
 ; RV64-NEXT:    vmv.s.x v9, a1
 ; RV64-NEXT:  .LBB4_2: # %else
 ; RV64-NEXT:    andi a0, a0, 2
@@ -503,7 +503,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV64-NEXT:    lwu a1, 0(a1)
 ; RV64-NEXT:    slli a2, a2, 32
 ; RV64-NEXT:    or a1, a2, a1
-; RV64-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-NEXT:    vsetivli zero, 2, e64,m1,tu,mu
 ; RV64-NEXT:    vmv.s.x v9, a1
 ; RV64-NEXT:  .LBB5_2: # %else
 ; RV64-NEXT:    andi a0, a0, 2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
index 9a0887ca75c2..98529d4965af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
@@ -5,7 +5,7 @@
 define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv1f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
@@ -40,7 +40,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
 define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv2f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
@@ -75,7 +75,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
 define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv4f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
 define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
@@ -145,7 +145,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
 define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv16f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
@@ -180,7 +180,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
 define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv32f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
@@ -215,7 +215,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
 define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv1f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
 define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv2f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
@@ -285,7 +285,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
 define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv4f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
@@ -320,7 +320,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
 define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv8f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
@@ -355,7 +355,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
 define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv16f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
@@ -390,7 +390,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, f
 define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv1f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
@@ -425,7 +425,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
 define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv2f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
@@ -460,7 +460,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
 define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv4f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
@@ -495,7 +495,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
 define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv8f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
index dff6cf7e9829..eac995b88879 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
@@ -5,7 +5,7 @@
 define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv1f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
@@ -40,7 +40,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
 define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv2f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
@@ -75,7 +75,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
 define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv4f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
@@ -110,7 +110,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
 define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
@@ -145,7 +145,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
 define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv16f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
@@ -180,7 +180,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
 define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv32f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
@@ -215,7 +215,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
 define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv1f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
@@ -250,7 +250,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
 define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv2f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
@@ -285,7 +285,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
 define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv4f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
@@ -320,7 +320,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
 define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv8f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
@@ -355,7 +355,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
 define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv16f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
@@ -390,7 +390,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, f
 define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv1f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
@@ -425,7 +425,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
 define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv2f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
@@ -460,7 +460,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
 define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv4f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
@@ -495,7 +495,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
 define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv8f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
index cb0040d4b98a..9da0ba193e10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
@@ -5,7 +5,7 @@
 define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
@@ -40,7 +40,7 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext
 define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
@@ -75,7 +75,7 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext
 define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
@@ -110,7 +110,7 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext
 define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
@@ -145,7 +145,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext
 define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
@@ -180,7 +180,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signe
 define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
@@ -215,7 +215,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signe
 define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
@@ -250,7 +250,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signe
 define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
@@ -285,7 +285,7 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 sign
 define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
@@ -320,7 +320,7 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 sign
 define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
@@ -355,7 +355,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 sign
 define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
@@ -390,7 +390,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 sign
 define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
@@ -425,7 +425,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 s
 define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
@@ -460,7 +460,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 s
 define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
@@ -495,7 +495,7 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %elt
 define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
@@ -530,7 +530,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %elt
 define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
@@ -565,7 +565,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %elt
 define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
@@ -600,7 +600,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %elt
 define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
@@ -809,7 +809,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0_c10(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_0_c10:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, zero, 10
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 0
@@ -847,7 +847,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0_cn1(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_0_cn1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, zero, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
index 1bf8cd7e95b4..33bf9edb91c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
@@ -5,7 +5,7 @@
 define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
@@ -40,7 +40,7 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext
 define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
@@ -75,7 +75,7 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext
 define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
@@ -110,7 +110,7 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext
 define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
@@ -145,7 +145,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext
 define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
@@ -180,7 +180,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signe
 define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
@@ -215,7 +215,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signe
 define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
@@ -250,7 +250,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signe
 define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
@@ -285,7 +285,7 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 sign
 define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
@@ -320,7 +320,7 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 sign
 define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
@@ -355,7 +355,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 sign
 define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
@@ -390,7 +390,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 sign
 define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
@@ -425,7 +425,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 s
 define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
@@ -460,7 +460,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 s
 define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
@@ -495,7 +495,7 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 sign
 define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
@@ -530,7 +530,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 sign
 define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
@@ -565,7 +565,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 sign
 define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
@@ -600,7 +600,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 sign
 define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
@@ -635,7 +635,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 s
 define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
@@ -671,7 +671,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
@@ -707,7 +707,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
@@ -743,7 +743,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
index 98ff9ce17988..e8e457e555e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
@@ -6,7 +6,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, ha
 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -19,7 +19,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, ha
 define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -32,7 +32,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, ha
 define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -45,7 +45,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, ha
 define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>,
 define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -71,7 +71,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>,
 define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -84,7 +84,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>,
 define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +97,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>,
 define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -110,7 +110,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>,
 define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -123,7 +123,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>,
 define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -136,7 +136,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float
 define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -149,7 +149,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>
 define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -162,7 +162,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>
 define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -175,7 +175,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>
 define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -188,7 +188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>
 define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
index dbd3c5b5f8e6..89b5567050df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
@@ -6,7 +6,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, ha
 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -19,7 +19,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, ha
 define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -32,7 +32,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, ha
 define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -45,7 +45,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, ha
 define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>,
 define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -71,7 +71,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>,
 define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -84,7 +84,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>,
 define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +97,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>,
 define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -110,7 +110,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>,
 define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -123,7 +123,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>,
 define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -136,7 +136,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float
 define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -149,7 +149,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>
 define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -162,7 +162,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>
 define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -175,7 +175,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>
 define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -188,7 +188,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>
 define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
index eac9d45d1e77..d3617de31952 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
index fbda7a8fb502..a60627898bc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
index 6455d558422b..d8ac37a06589 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
index 531d178b77a6..24a7862e7f51 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
index 0ca4d8c2796b..6b285149923f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
index 80e5b505d80b..6cee3b4baebf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
index d6b1bfb9309e..a867237f7526 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
index 87b09db30325..94295b139477 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
index edc3fa142970..60562bd45793 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f3
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
index 8e1fbeedcab3..510164c7a9c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f3
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
index 667532a4ae17..8b3c04521dd2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
index ee820c422c9d..a3140c680a6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
index 3c48d763537d..b788403820b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
@@ -6,7 +6,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i32)
 define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -19,7 +19,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i32)
 define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -32,7 +32,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i32)
 define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -45,7 +45,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i32)
 define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
 define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
 define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
 define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +97,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16,
 define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -110,7 +110,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16,
 define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -123,7 +123,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16,
 define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
 define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
 define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
 define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -175,7 +175,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32,
 define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -188,7 +188,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32,
 define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
 define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
 define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
 define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
index e1334bec90c8..c2a0f89d563b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
@@ -6,7 +6,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i64)
 define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -19,7 +19,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i64)
 define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -32,7 +32,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i64)
 define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -45,7 +45,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i64)
 define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
 define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
 define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
 define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -97,7 +97,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16,
 define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -110,7 +110,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16,
 define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -123,7 +123,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16,
 define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
 define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
 define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
 define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -175,7 +175,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32,
 define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -188,7 +188,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32,
 define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
 define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
 define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
 define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64,
 define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -253,7 +253,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64,
 define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -266,7 +266,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64,
 define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -279,7 +279,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64,
 define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
index 7a38f52c3d18..4e637bbc2638 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
index ad8fb0bcc69e..497e3d813d37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
index 6c61629c6ce7..dec5014fcd52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
index 99fca3c45739..0585e2d4e67f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
index 3cd922a1d121..e652be484e4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
index 57c6efa34609..69a862424e34 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
index 85d8825c7c09..7417b2d666b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
index cce09b041c4d..c8244eb53078 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
index bbca073b2a34..14a1d5b4cce8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
index 8e5524723a57..2cd9b26bdea3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
index a9ad2d25d9a8..22882a3a50da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
index 9ddd87ca2e47..b96a9055dbe2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
index 3f3f25b17053..1d38b3e7db71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
index b9ba10a88e41..e20dd91c1b54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
index 48813d72bb15..a276551ac26d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
index dc10fcce7568..40622d5cfe35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
index e05ff0c4a1d6..070d3028d4e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
index 44fcd8643c33..973333ec78cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
index 443287e1fced..d6e32e0d906a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
index 42164f46c5cc..191199af3b46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetvli zero, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 5522ed95b193..68b9bd9cbe77 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -578,14 +578,18 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32MV-NEXT:    vsll.vi v26, v25, 1
 ; RV32MV-NEXT:    addi a1, zero, 9
 ; RV32MV-NEXT:    vmv.v.i v27, 10
+; RV32MV-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
 ; RV32MV-NEXT:    vmv.s.x v27, a1
+; RV32MV-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; RV32MV-NEXT:    vsll.vv v26, v26, v27
 ; RV32MV-NEXT:    addi a1, zero, 2047
 ; RV32MV-NEXT:    vand.vx v25, v25, a1
 ; RV32MV-NEXT:    vmv.v.i v27, 0
 ; RV32MV-NEXT:    addi a2, zero, 1
+; RV32MV-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
 ; RV32MV-NEXT:    vmv1r.v v28, v27
 ; RV32MV-NEXT:    vmv.s.x v28, a2
+; RV32MV-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; RV32MV-NEXT:    lui a2, %hi(.LCPI4_1)
 ; RV32MV-NEXT:    addi a2, a2, %lo(.LCPI4_1)
 ; RV32MV-NEXT:    vle16.v v29, (a2)
@@ -639,14 +643,18 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64MV-NEXT:    vsll.vi v26, v25, 1
 ; RV64MV-NEXT:    addi a1, zero, 9
 ; RV64MV-NEXT:    vmv.v.i v27, 10
+; RV64MV-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
 ; RV64MV-NEXT:    vmv.s.x v27, a1
+; RV64MV-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; RV64MV-NEXT:    vsll.vv v26, v26, v27
 ; RV64MV-NEXT:    addi a1, zero, 2047
 ; RV64MV-NEXT:    vand.vx v25, v25, a1
 ; RV64MV-NEXT:    vmv.v.i v27, 0
 ; RV64MV-NEXT:    addi a2, zero, 1
+; RV64MV-NEXT:    vsetvli zero, zero, e16,mf2,tu,mu
 ; RV64MV-NEXT:    vmv1r.v v28, v27
 ; RV64MV-NEXT:    vmv.s.x v28, a2
+; RV64MV-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; RV64MV-NEXT:    lui a2, %hi(.LCPI4_1)
 ; RV64MV-NEXT:    addi a2, a2, %lo(.LCPI4_1)
 ; RV64MV-NEXT:    vle16.v v29, (a2)


        


More information about the llvm-commits mailing list