[llvm] 7da272a - [RISCV][RISCVISelLowering] Add tail agnostic policy operand to VECREDUCE instructions

Nitin John Raj via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 25 02:47:07 PDT 2023


Author: Nitin John Raj
Date: 2023-03-25T02:42:13-07:00
New Revision: 7da272af89f8b4563082eba955ce513e4feff201

URL: https://github.com/llvm/llvm-project/commit/7da272af89f8b4563082eba955ce513e4feff201
DIFF: https://github.com/llvm/llvm-project/commit/7da272af89f8b4563082eba955ce513e4feff201.diff

LOG: [RISCV][RISCVISelLowering] Add tail agnostic policy operand to VECREDUCE instructions

Differential Revision: https://reviews.llvm.org/D146752

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
    llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
    llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
    llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7256416520e60..aad626f3f06a7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6334,8 +6334,9 @@ static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
                                DAG.getUNDEF(M1VT),
                                InitialValue, DAG.getConstant(0, DL, XLenVT));
   SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue;
-  SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, PassThru, Vec,
-                                  InitialValue, Mask, VL);
+  SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+  SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy};
+  SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, Ops);
   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
                      DAG.getConstant(0, DL, XLenVT));
 }
@@ -8751,10 +8752,11 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
         DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalarVT, DAG.getUNDEF(ScalarVT),
                     NewScalarV, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
 
+  SDValue Ops[] = {Reduce.getOperand(0), Reduce.getOperand(1),
+                   NewScalarV,           Reduce.getOperand(3),
+                   Reduce.getOperand(4), Reduce.getOperand(5)};
   SDValue NewReduce =
-      DAG.getNode(Reduce.getOpcode(), DL, Reduce.getValueType(),
-                  Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
-                  Reduce.getOperand(3), Reduce.getOperand(4));
+      DAG.getNode(Reduce.getOpcode(), DL, Reduce.getValueType(), Ops);
   return DAG.getNode(Extract.getOpcode(), DL, Extract.getValueType(), NewReduce,
                      Extract.getOperand(1));
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 3bf70e47aa2ca..b1da93821affe 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1237,6 +1237,25 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
   let UsesMaskPolicy = 1;
 }
 
+class VPseudoBinaryTailPolicy<VReg RetClass,
+                              RegisterClass Op1Class,
+                              DAGOperand Op2Class,
+                              string Constraint> :
+        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+                (ins GetVRegNoV0<RetClass>.R:$merge,
+                     Op1Class:$rs2, Op2Class:$rs1,
+                     VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
+  let HasVecPolicyOp = 1;
+}
+
 // Like VPseudoBinaryMask, but output can be V0.
 class VPseudoBinaryMOutMask<VReg RetClass,
                             RegisterClass Op1Class,
@@ -3147,6 +3166,20 @@ multiclass VPseudoTernaryNoMaskNoPolicy<VReg RetClass,
   }
 }
 
+multiclass VPseudoTernaryWithTailPolicy<VReg RetClass,
+                                        RegisterClass Op1Class,
+                                        DAGOperand Op2Class,
+                                        LMULInfo MInfo,
+                                        string Constraint = "",
+                                        bit Commutable = 0> {
+  let VLMul = MInfo.value in {
+    let isCommutable = Commutable in
+    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
+    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryTailPolicy<RetClass, Op1Class, Op2Class, Constraint>;
+  }
+}
+
+
 multiclass VPseudoTernaryWithPolicy<VReg RetClass,
                                     RegisterClass Op1Class,
                                     DAGOperand Op2Class,
@@ -3402,7 +3435,7 @@ multiclass VPseudoVRED_VS {
   foreach m = MxList in {
     defvar mx = m.MX;
     defvar WriteVIRedV_From_MX = !cast<SchedWrite>("WriteVIRedV_From_" # mx);
-    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
+    defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
                Sched<[WriteVIRedV_From_MX, ReadVIRedV, ReadVIRedV, ReadVIRedV,
                       ReadVMask]>;
   }
@@ -3412,7 +3445,7 @@ multiclass VPseudoVWRED_VS {
   foreach m = MxList in {
     defvar mx = m.MX;
     defvar WriteVIWRedV_From_MX = !cast<SchedWrite>("WriteVIWRedV_From_" # mx);
-    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
+    defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
                Sched<[WriteVIWRedV_From_MX, ReadVIWRedV, ReadVIWRedV,
                       ReadVIWRedV, ReadVMask]>;
   }
@@ -3422,7 +3455,7 @@ multiclass VPseudoVFRED_VS {
   foreach m = MxListF in {
     defvar mx = m.MX;
     defvar WriteVFRedV_From_MX = !cast<SchedWrite>("WriteVFRedV_From_" # mx);
-    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
+    defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
                Sched<[WriteVFRedV_From_MX, ReadVFRedV, ReadVFRedV, ReadVFRedV,
                       ReadVMask]>;
   }
@@ -3432,7 +3465,7 @@ multiclass VPseudoVFREDO_VS {
   foreach m = MxListF in {
     defvar mx = m.MX;
     defvar WriteVFRedOV_From_MX = !cast<SchedWrite>("WriteVFRedOV_From_" # mx);
-    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
+    defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
                Sched<[WriteVFRedOV_From_MX, ReadVFRedOV, ReadVFRedOV,
                       ReadVFRedOV, ReadVMask]>;
   }
@@ -3442,7 +3475,7 @@ multiclass VPseudoVFWRED_VS {
   foreach m = MxListF in {
     defvar mx = m.MX;
     defvar WriteVFWRedV_From_MX = !cast<SchedWrite>("WriteVFWRedV_From_" # mx);
-    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
+    defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
                Sched<[WriteVFWRedV_From_MX, ReadVFWRedV, ReadVFWRedV,
                       ReadVFWRedV, ReadVMask]>;
   }
@@ -4238,6 +4271,28 @@ class VPatTernaryNoMask<string intrinsic,
                     op2_kind:$rs2,
                     GPR:$vl, sew)>;
 
+class VPatTernaryNoMaskTA<string intrinsic,
+                          string inst,
+                          string kind,
+                          ValueType result_type,
+                          ValueType op1_type,
+                          ValueType op2_type,
+                          int sew,
+                          LMULInfo vlmul,
+                          VReg result_reg_class,
+                          RegisterClass op1_reg_class,
+                          DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic)
+                    (result_type result_reg_class:$rs3),
+                    (op1_type op1_reg_class:$rs1),
+                    (op2_type op2_kind:$rs2),
+                    VLOpFrag)),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                    result_reg_class:$rs3,
+                    (op1_type op1_reg_class:$rs1),
+                    op2_kind:$rs2,
+                    GPR:$vl, sew, TAIL_AGNOSTIC)>;
+
 class VPatTernaryNoMaskWithPolicy<string intrinsic,
                                   string inst,
                                   string kind,
@@ -4310,6 +4365,31 @@ class VPatTernaryMaskPolicy<string intrinsic,
                     (mask_type V0),
                     GPR:$vl, sew, (XLenVT timm:$policy))>;
 
+class VPatTernaryMaskTA<string intrinsic,
+                        string inst,
+                        string kind,
+                        ValueType result_type,
+                        ValueType op1_type,
+                        ValueType op2_type,
+                        ValueType mask_type,
+                        int sew,
+                        LMULInfo vlmul,
+                        VReg result_reg_class,
+                        RegisterClass op1_reg_class,
+                        DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
+                    (result_type result_reg_class:$rs3),
+                    (op1_type op1_reg_class:$rs1),
+                    (op2_type op2_kind:$rs2),
+                    (mask_type V0),
+                    VLOpFrag)),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
+                    result_reg_class:$rs3,
+                    (op1_type op1_reg_class:$rs1),
+                    op2_kind:$rs2,
+                    (mask_type V0),
+                    GPR:$vl, sew, TAIL_AGNOSTIC)>;
+
 multiclass VPatUnaryS_M<string intrinsic_name,
                              string inst>
 {
@@ -5053,6 +5133,26 @@ multiclass VPatTernaryWithPolicy<string intrinsic,
                               op2_kind>;
 }
 
+multiclass VPatTernaryTA<string intrinsic,
+                         string inst,
+                         string kind,
+                         ValueType result_type,
+                         ValueType op1_type,
+                         ValueType op2_type,
+                         ValueType mask_type,
+                         int sew,
+                         LMULInfo vlmul,
+                         VReg result_reg_class,
+                         RegisterClass op1_reg_class,
+                         DAGOperand op2_kind> {
+  def : VPatTernaryNoMaskTA<intrinsic, inst, kind, result_type, op1_type,
+                            op2_type, sew, vlmul, result_reg_class,
+                            op1_reg_class, op2_kind>;
+  def : VPatTernaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
+                          op2_type, mask_type, sew, vlmul, result_reg_class,
+                          op1_reg_class, op2_kind>;
+}
+
 multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
                                 list<VTypeInfo> vtilist> {
   foreach vti = vtilist in
@@ -5157,19 +5257,19 @@ multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat =
   foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
   {
     defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
-    defm : VPatTernary<intrinsic, instruction, "VS",
-                       vectorM1.Vector, vti.Vector,
-                       vectorM1.Vector, vti.Mask,
-                       vti.Log2SEW, vti.LMul,
-                       VR, vti.RegClass, VR>;
+    defm : VPatTernaryTA<intrinsic, instruction, "VS",
+                         vectorM1.Vector, vti.Vector,
+                         vectorM1.Vector, vti.Mask,
+                         vti.Log2SEW, vti.LMul,
+                         VR, vti.RegClass, VR>;
   }
   foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
   {
-    defm : VPatTernary<intrinsic, instruction, "VS",
-                       gvti.VectorM1, gvti.Vector,
-                       gvti.VectorM1, gvti.Mask,
-                       gvti.Log2SEW, gvti.LMul,
-                       VR, gvti.RegClass, VR>;
+    defm : VPatTernaryTA<intrinsic, instruction, "VS",
+                         gvti.VectorM1, gvti.Vector,
+                         gvti.VectorM1, gvti.Mask,
+                         gvti.Log2SEW, gvti.LMul,
+                         VR, gvti.RegClass, VR>;
   }
 }
 
@@ -5179,12 +5279,12 @@ multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat =
     defvar wtiSEW = !mul(vti.SEW, 2);
     if !le(wtiSEW, 64) then {
       defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
-      defm : VPatTernary<intrinsic, instruction, "VS",
-                         wtiM1.Vector, vti.Vector,
-                         wtiM1.Vector, vti.Mask,
-                         vti.Log2SEW, vti.LMul,
-                         wtiM1.RegClass, vti.RegClass,
-                         wtiM1.RegClass>;
+      defm : VPatTernaryTA<intrinsic, instruction, "VS",
+                           wtiM1.Vector, vti.Vector,
+                           wtiM1.Vector, vti.Mask,
+                           vti.Log2SEW, vti.LMul,
+                           wtiM1.RegClass, vti.RegClass,
+                           wtiM1.RegClass>;
     }
   }
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index d3e9aa3d17750..43ca9259ef2a8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -325,9 +325,10 @@ def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWBinOpW_VL>;
 def riscv_vwsub_w_vl :  SDNode<"RISCVISD::VWSUB_W_VL",  SDT_RISCVVWBinOpW_VL>;
 def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWBinOpW_VL>;
 
-def SDTRVVVecReduce : SDTypeProfile<1, 5, [
+def SDTRVVVecReduce : SDTypeProfile<1, 6, [
   SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>,
-  SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>
+  SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>,
+  SDTCisVT<6, XLenVT>
 ]>;
 
 def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
@@ -1081,25 +1082,29 @@ multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> {
 multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
   foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
     defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
-    def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2,
-                                 (vti.Mask true_mask),
-                                 VLOpFrag)),
+    def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
+                                 (vti.Vector vti.RegClass:$rs1), VR:$rs2,
+                                 (vti.Mask true_mask), VLOpFrag,
+                                 (XLenVT timm:$policy))),
         (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
             (vti_m1.Vector VR:$merge),
             (vti.Vector vti.RegClass:$rs1),
             (vti_m1.Vector VR:$rs2),
-            GPR:$vl, vti.Log2SEW)>;
+            GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
 
-    def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2,
-                                 (vti.Mask V0), VLOpFrag)),
+    def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
+                                 (vti.Vector vti.RegClass:$rs1), VR:$rs2,
+                                 (vti.Mask V0), VLOpFrag,
+                                 (XLenVT timm:$policy))),
         (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK")
             (vti_m1.Vector VR:$merge),
             (vti.Vector vti.RegClass:$rs1),
             (vti_m1.Vector VR:$rs2),
-            (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+            (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
   }
 }
 
+
 multiclass VPatBinaryExtVL_WV_WX<SDNode op, PatFrags extop, string instruction_name> {
   foreach vtiToWti = AllWidenableIntVectors in {
     defvar vti = vtiToWti.Vti;
@@ -1149,16 +1154,20 @@ multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_n
     defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
     def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
                                  (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
-                                 VR:$rs2, (vti.Mask true_mask), VLOpFrag)),
+                                 VR:$rs2, (vti.Mask true_mask), VLOpFrag,
+                                 (XLenVT timm:$policy))),
              (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
                 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
-                (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>;
+                (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW,
+                (XLenVT timm:$policy))>;
     def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
                                  (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
-                                 VR:$rs2, (vti.Mask V0), VLOpFrag)),
+                                 VR:$rs2, (vti.Mask V0), VLOpFrag,
+                                 (XLenVT timm:$policy))),
              (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK")
                 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
-                (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+                (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+                (XLenVT timm:$policy))>;
   }
 }
 
@@ -1169,16 +1178,20 @@ multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instru
     defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
     def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
                                  (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
-                                 VR:$rs2, (vti.Mask true_mask), VLOpFrag)),
+                                 VR:$rs2, (vti.Mask true_mask), VLOpFrag,
+                                 (XLenVT timm:$policy))),
              (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
                 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
-                (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>;
+                (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW,
+                (XLenVT timm:$policy))>;
     def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
                                  (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
-                                 VR:$rs2, (vti.Mask V0), VLOpFrag)),
+                                 VR:$rs2, (vti.Mask V0), VLOpFrag,
+                                 (XLenVT timm:$policy))),
              (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK")
                 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
-                (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+                (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+                (XLenVT timm:$policy))>;
   }
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
index 3872f20d8a4a1..c6cb4bbea4924 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
@@ -11,7 +11,7 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroex
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -24,7 +24,7 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 ze
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -39,7 +39,7 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroex
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -52,7 +52,7 @@ define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 ze
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zer
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -80,7 +80,7 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -95,7 +95,7 @@ define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zer
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -108,7 +108,7 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -131,13 +131,13 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
 ; CHECK-NEXT:  .LBB8_2:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v25, fa0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    addi a1, a0, -32
 ; CHECK-NEXT:    sltu a0, a0, a1
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    and a0, a0, a1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfredusum.vs v25, v16, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -159,13 +159,13 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m,
 ; CHECK-NEXT:  .LBB9_2:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v25, fa0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    addi a1, a0, -32
 ; CHECK-NEXT:    sltu a0, a0, a1
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    and a0, a0, a1
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfredosum.vs v25, v16, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -181,7 +181,7 @@ define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -194,7 +194,7 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -209,7 +209,7 @@ define double @vpreduce_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -222,7 +222,7 @@ define double @vpreduce_ord_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -237,7 +237,7 @@ define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -250,7 +250,7 @@ define double @vpreduce_ord_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 1e384cb6037e9..fd9e0098eb76a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -11,7 +11,7 @@ define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -27,7 +27,7 @@ define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -42,7 +42,7 @@ define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -58,7 +58,7 @@ define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -73,7 +73,7 @@ define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -88,7 +88,7 @@ define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -103,7 +103,7 @@ define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -118,7 +118,7 @@ define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -134,7 +134,7 @@ define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -149,7 +149,7 @@ define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -165,7 +165,7 @@ define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -180,7 +180,7 @@ define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -196,7 +196,7 @@ define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -211,7 +211,7 @@ define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -226,7 +226,7 @@ define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -241,7 +241,7 @@ define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -256,7 +256,7 @@ define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -271,7 +271,7 @@ define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -288,7 +288,7 @@ define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -299,7 +299,7 @@ define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -314,7 +314,7 @@ define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -331,7 +331,7 @@ define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -342,7 +342,7 @@ define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -357,7 +357,7 @@ define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -372,7 +372,7 @@ define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -387,7 +387,7 @@ define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -402,7 +402,7 @@ define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -417,7 +417,7 @@ define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -434,7 +434,7 @@ define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -445,7 +445,7 @@ define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -460,7 +460,7 @@ define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -477,7 +477,7 @@ define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -488,7 +488,7 @@ define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -503,7 +503,7 @@ define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -518,7 +518,7 @@ define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -533,7 +533,7 @@ define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -548,7 +548,7 @@ define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -563,7 +563,7 @@ define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -578,7 +578,7 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -589,7 +589,7 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -604,7 +604,7 @@ define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -619,7 +619,7 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -630,7 +630,7 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -645,7 +645,7 @@ define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -660,7 +660,7 @@ define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -675,7 +675,7 @@ define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -690,7 +690,7 @@ define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -705,7 +705,7 @@ define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -720,7 +720,7 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -731,7 +731,7 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -746,7 +746,7 @@ define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -761,7 +761,7 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -772,7 +772,7 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -787,7 +787,7 @@ define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -802,7 +802,7 @@ define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -817,7 +817,7 @@ define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -832,7 +832,7 @@ define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -855,7 +855,7 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1>
 ; CHECK-NEXT:  .LBB49_2:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v25, a0
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
@@ -864,7 +864,7 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1>
 ; CHECK-NEXT:    sltu a1, a1, a0
 ; CHECK-NEXT:    addi a1, a1, -1
 ; CHECK-NEXT:    and a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vredxor.vs v8, v16, v8, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -885,7 +885,7 @@ define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -899,7 +899,7 @@ define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -919,7 +919,7 @@ define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -933,7 +933,7 @@ define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -953,7 +953,7 @@ define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -967,7 +967,7 @@ define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -987,7 +987,7 @@ define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1001,7 +1001,7 @@ define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1021,7 +1021,7 @@ define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1035,7 +1035,7 @@ define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1055,7 +1055,7 @@ define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1069,7 +1069,7 @@ define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1089,7 +1089,7 @@ define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1103,7 +1103,7 @@ define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1123,7 +1123,7 @@ define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1137,7 +1137,7 @@ define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1157,7 +1157,7 @@ define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1171,7 +1171,7 @@ define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1191,7 +1191,7 @@ define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1205,7 +1205,7 @@ define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1225,7 +1225,7 @@ define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1239,7 +1239,7 @@ define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1259,7 +1259,7 @@ define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1273,7 +1273,7 @@ define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1293,7 +1293,7 @@ define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1307,7 +1307,7 @@ define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1327,7 +1327,7 @@ define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1341,7 +1341,7 @@ define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1361,7 +1361,7 @@ define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1375,7 +1375,7 @@ define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1395,7 +1395,7 @@ define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1409,7 +1409,7 @@ define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
index 72a6ae952cd2d..1ed4e5ca44f1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll
@@ -12,7 +12,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -35,7 +35,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -81,7 +81,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -104,7 +104,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -150,7 +150,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -173,7 +173,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -196,7 +196,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -219,7 +219,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -242,7 +242,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -265,7 +265,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -288,7 +288,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -311,7 +311,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +334,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -357,7 +357,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -380,7 +380,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -403,7 +403,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -426,7 +426,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -449,7 +449,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -472,7 +472,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -495,7 +495,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -518,7 +518,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -541,7 +541,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -564,7 +564,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -587,7 +587,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -610,7 +610,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -633,7 +633,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -656,7 +656,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -679,7 +679,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
index dcb34674c2980..415392e2d0034 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll
@@ -12,7 +12,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -35,7 +35,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -81,7 +81,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -104,7 +104,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -150,7 +150,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -173,7 +173,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -196,7 +196,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -219,7 +219,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -242,7 +242,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -265,7 +265,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -288,7 +288,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -311,7 +311,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +334,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -357,7 +357,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -380,7 +380,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -403,7 +403,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -426,7 +426,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -449,7 +449,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -472,7 +472,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -495,7 +495,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -518,7 +518,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -541,7 +541,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -564,7 +564,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -587,7 +587,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -610,7 +610,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -633,7 +633,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -656,7 +656,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -679,7 +679,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
index 221a8fcc52c66..6d49023f6d525 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll
@@ -12,7 +12,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -35,7 +35,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -81,7 +81,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -104,7 +104,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -150,7 +150,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -173,7 +173,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -196,7 +196,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -219,7 +219,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -242,7 +242,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -265,7 +265,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -288,7 +288,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -311,7 +311,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +334,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -357,7 +357,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -380,7 +380,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -403,7 +403,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -426,7 +426,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -449,7 +449,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -472,7 +472,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -495,7 +495,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -518,7 +518,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -541,7 +541,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -564,7 +564,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -587,7 +587,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -610,7 +610,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -633,7 +633,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -656,7 +656,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -679,7 +679,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
index 053316da471b5..f72aed34226ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll
@@ -12,7 +12,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -35,7 +35,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1(
 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -81,7 +81,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1(
 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -104,7 +104,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1(
 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -150,7 +150,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -173,7 +173,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1(
 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -196,7 +196,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -219,7 +219,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1(
 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -242,7 +242,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -265,7 +265,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1(
 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -288,7 +288,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -311,7 +311,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1(
 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +334,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -357,7 +357,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1(
 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -380,7 +380,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -403,7 +403,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1(
 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -426,7 +426,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -449,7 +449,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1(
 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -472,7 +472,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -495,7 +495,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1
 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -518,7 +518,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -541,7 +541,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1(
 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -564,7 +564,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -587,7 +587,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1(
 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -610,7 +610,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -633,7 +633,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1(
 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -656,7 +656,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -679,7 +679,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1(
 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
index 6b967c5efcc39..c850c303592cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll
@@ -12,7 +12,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -35,7 +35,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -81,7 +81,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -104,7 +104,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -150,7 +150,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -173,7 +173,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -196,7 +196,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -219,7 +219,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f3
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -242,7 +242,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -265,7 +265,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -288,7 +288,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -311,7 +311,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +334,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -357,7 +357,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -380,7 +380,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -403,7 +403,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -426,7 +426,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -449,7 +449,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -472,7 +472,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -495,7 +495,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f
 define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
index 427f924fcfc4c..77df494ccd402 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll
@@ -12,7 +12,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -35,7 +35,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -58,7 +58,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -81,7 +81,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -104,7 +104,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -150,7 +150,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -173,7 +173,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32
 define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -196,7 +196,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -219,7 +219,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f3
 define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -242,7 +242,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -265,7 +265,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
 define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -288,7 +288,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
 define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -311,7 +311,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -334,7 +334,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
 define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -357,7 +357,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -380,7 +380,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
 define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -403,7 +403,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -426,7 +426,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
 define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -449,7 +449,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f6
 define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -472,7 +472,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
 define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -495,7 +495,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f
 define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index 29ea7d9fde5b7..aa04369b86978 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -214,12 +214,12 @@ body:             |
     ; CHECK: liveins: $x10, $v8, $v26, $v27
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v26 = VMV1R_V killed $v8
     ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v8m8 = VL8RE8_V killed $x10
     $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
-    $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype
+    $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, 1, implicit $vl, implicit $vtype
     $v26 = COPY killed renamable $v8
     $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype
     $v8m8 = VL8RE8_V killed $x10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
index 2b1541b0bb5e3..b9a6c8d07a9b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
index 765e2dc5de0e5..404768d718dd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
index 30bd0d93ce1a2..745abee375d98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
index b8bb70cc0e6ef..02b9f5f537ef4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
index 81bcace763748..01be00d6790ec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
index 1318d8f3d981b..22cc5ef74b4bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
index 833f2f675c957..594916ac7e329 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
index 50ce771c198aa..e4d6180378f50 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
index 7c71be9e4d540..e6b4d73f0178e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
index 9ab1adbef7967..506127ba221d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
index 1d2f411d1c540..52728b8b39c25 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
index 3eb69631df6fe..27ecdcfa6ccf1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
index c7c098daae518..4d408b9f59e8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
index a72bd9a4f1b6c..f77ecafdf773d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index 155a36f58e3b4..89a385233380b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -11,7 +11,7 @@ define half @vpreduce_fadd_nxv1f16(half %s, <vscale x 1 x half> %v, <vscale x 1
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -24,7 +24,7 @@ define half @vpreduce_ord_fadd_nxv1f16(half %s, <vscale x 1 x half> %v, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -39,7 +39,7 @@ define half @vpreduce_fadd_nxv2f16(half %s, <vscale x 2 x half> %v, <vscale x 2
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -52,7 +52,7 @@ define half @vpreduce_ord_fadd_nxv2f16(half %s, <vscale x 2 x half> %v, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define half @vpreduce_fadd_nxv4f16(half %s, <vscale x 4 x half> %v, <vscale x 4
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -80,7 +80,7 @@ define half @vpreduce_ord_fadd_nxv4f16(half %s, <vscale x 4 x half> %v, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -105,13 +105,13 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:  .LBB6_2:
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    sub a1, a0, a1
 ; CHECK-NEXT:    sltu a0, a0, a1
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    and a0, a0, a1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfredusum.vs v25, v16, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -135,13 +135,13 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:  .LBB7_2:
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    sub a1, a0, a1
 ; CHECK-NEXT:    sltu a0, a0, a1
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    and a0, a0, a1
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfredosum.vs v25, v16, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -157,7 +157,7 @@ define float @vpreduce_fadd_nxv1f32(float %s, <vscale x 1 x float> %v, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -170,7 +170,7 @@ define float @vpreduce_ord_fadd_nxv1f32(float %s, <vscale x 1 x float> %v, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -185,7 +185,7 @@ define float @vpreduce_fadd_nxv2f32(float %s, <vscale x 2 x float> %v, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -198,7 +198,7 @@ define float @vpreduce_ord_fadd_nxv2f32(float %s, <vscale x 2 x float> %v, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -213,7 +213,7 @@ define float @vpreduce_fadd_nxv4f32(float %s, <vscale x 4 x float> %v, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -226,7 +226,7 @@ define float @vpreduce_ord_fadd_nxv4f32(float %s, <vscale x 4 x float> %v, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -241,7 +241,7 @@ define double @vpreduce_fadd_nxv1f64(double %s, <vscale x 1 x double> %v, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -254,7 +254,7 @@ define double @vpreduce_ord_fadd_nxv1f64(double %s, <vscale x 1 x double> %v, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
@@ -269,7 +269,7 @@ define double @vpreduce_fadd_nxv2f64(double %s, <vscale x 2 x double> %v, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -282,7 +282,7 @@ define double @vpreduce_ord_fadd_nxv2f64(double %s, <vscale x 2 x double> %v, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
@@ -297,7 +297,7 @@ define double @vpreduce_fadd_nxv3f64(double %s, <vscale x 3 x double> %v, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v12, v8, v12, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v12
 ; CHECK-NEXT:    ret
@@ -310,7 +310,7 @@ define double @vpreduce_ord_fadd_nxv3f64(double %s, <vscale x 3 x double> %v, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v12, v8, v12, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v12
 ; CHECK-NEXT:    ret
@@ -325,7 +325,7 @@ define double @vpreduce_fadd_nxv4f64(double %s, <vscale x 4 x double> %v, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredusum.vs v12, v8, v12, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v12
 ; CHECK-NEXT:    ret
@@ -338,7 +338,7 @@ define double @vpreduce_ord_fadd_nxv4f64(double %s, <vscale x 4 x double> %v, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfredosum.vs v12, v8, v12, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v12
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index af9f1a3664d7c..c02dd70c33cb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -11,7 +11,7 @@ define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -27,7 +27,7 @@ define signext i8 @vpreduce_umax_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vs
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -42,7 +42,7 @@ define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -58,7 +58,7 @@ define signext i8 @vpreduce_umin_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vs
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -73,7 +73,7 @@ define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -88,7 +88,7 @@ define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -103,7 +103,7 @@ define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -118,7 +118,7 @@ define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, <vscale x 1 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -133,7 +133,7 @@ define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -149,7 +149,7 @@ define signext i8 @vpreduce_umax_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vs
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -164,7 +164,7 @@ define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -180,7 +180,7 @@ define signext i8 @vpreduce_umin_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vs
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -195,7 +195,7 @@ define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -210,7 +210,7 @@ define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -225,7 +225,7 @@ define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -240,7 +240,7 @@ define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, <vscale x 2 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -255,7 +255,7 @@ define signext i8 @vpreduce_smax_nxv3i8(i8 signext %s, <vscale x 3 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -270,7 +270,7 @@ define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -286,7 +286,7 @@ define signext i8 @vpreduce_umax_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vs
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -301,7 +301,7 @@ define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -317,7 +317,7 @@ define signext i8 @vpreduce_umin_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vs
 ; CHECK-NEXT:    andi a0, a0, 255
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -332,7 +332,7 @@ define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -347,7 +347,7 @@ define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -362,7 +362,7 @@ define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -377,7 +377,7 @@ define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, <vscale x 4 x i8> %v, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -392,7 +392,7 @@ define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -420,7 +420,7 @@ define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -435,7 +435,7 @@ define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -452,7 +452,7 @@ define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -463,7 +463,7 @@ define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -478,7 +478,7 @@ define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -493,7 +493,7 @@ define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -508,7 +508,7 @@ define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -538,7 +538,7 @@ define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -555,7 +555,7 @@ define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -566,7 +566,7 @@ define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -581,7 +581,7 @@ define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -598,7 +598,7 @@ define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -609,7 +609,7 @@ define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -624,7 +624,7 @@ define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -639,7 +639,7 @@ define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -654,7 +654,7 @@ define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -669,7 +669,7 @@ define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -684,7 +684,7 @@ define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -701,7 +701,7 @@ define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -712,7 +712,7 @@ define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -727,7 +727,7 @@ define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -744,7 +744,7 @@ define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -755,7 +755,7 @@ define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -770,7 +770,7 @@ define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -785,7 +785,7 @@ define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -800,7 +800,7 @@ define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -815,7 +815,7 @@ define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -830,7 +830,7 @@ define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -845,7 +845,7 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -856,7 +856,7 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -871,7 +871,7 @@ define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -886,7 +886,7 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -897,7 +897,7 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -912,7 +912,7 @@ define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -927,7 +927,7 @@ define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -942,7 +942,7 @@ define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -957,7 +957,7 @@ define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, <vscale x 1 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -972,7 +972,7 @@ define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -987,7 +987,7 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -998,7 +998,7 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1013,7 +1013,7 @@ define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -1028,7 +1028,7 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    ret
@@ -1039,7 +1039,7 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1054,7 +1054,7 @@ define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -1069,7 +1069,7 @@ define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -1084,7 +1084,7 @@ define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -1099,7 +1099,7 @@ define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, <vscale x 2 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -1114,7 +1114,7 @@ define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v10
 ; CHECK-NEXT:    ret
@@ -1129,7 +1129,7 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v10, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    ret
@@ -1140,7 +1140,7 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1168,12 +1168,12 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    mv a1, a3
 ; RV32-NEXT:  .LBB67_2:
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v25, v8, v25, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v25
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v8, a0
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV32-NEXT:    vmv1r.v v0, v24
 ; RV32-NEXT:    vredmaxu.vs v8, v16, v8, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -1198,12 +1198,12 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %
 ; RV64-NEXT:  .LBB67_2:
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v25, a2
-; RV64-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v25, v8, v25, v0.t
 ; RV64-NEXT:    vmv.x.s a1, v25
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v8, a1
-; RV64-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; RV64-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vredmaxu.vs v8, v16, v8, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v8
@@ -1219,7 +1219,7 @@ define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v10
 ; CHECK-NEXT:    ret
@@ -1234,7 +1234,7 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.s.x v10, a0
-; RV32-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; RV32-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    ret
@@ -1245,7 +1245,7 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; RV64-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1260,7 +1260,7 @@ define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v10
 ; CHECK-NEXT:    ret
@@ -1275,7 +1275,7 @@ define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v10
 ; CHECK-NEXT:    ret
@@ -1290,7 +1290,7 @@ define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v10
 ; CHECK-NEXT:    ret
@@ -1305,7 +1305,7 @@ define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, <vscale x 4 x i32> %v,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.x.s a0, v10
 ; CHECK-NEXT:    ret
@@ -1325,7 +1325,7 @@ define signext i64 @vpreduce_add_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1339,7 +1339,7 @@ define signext i64 @vpreduce_add_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1357,7 +1357,7 @@ define signext i64 @vpwreduce_add_nxv1i32(i64 signext %s, <vscale x 1 x i32> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e32, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; RV32-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v9
@@ -1372,7 +1372,7 @@ define signext i64 @vpwreduce_add_nxv1i32(i64 signext %s, <vscale x 1 x i32> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV64-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v9
@@ -1392,7 +1392,7 @@ define signext i64 @vpwreduce_uadd_nxv1i32(i64 signext %s, <vscale x 1 x i32> %v
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e32, mf2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; RV32-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v9
@@ -1407,7 +1407,7 @@ define signext i64 @vpwreduce_uadd_nxv1i32(i64 signext %s, <vscale x 1 x i32> %v
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; RV64-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v9
@@ -1429,7 +1429,7 @@ define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1443,7 +1443,7 @@ define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1463,7 +1463,7 @@ define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1477,7 +1477,7 @@ define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1497,7 +1497,7 @@ define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1511,7 +1511,7 @@ define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1531,7 +1531,7 @@ define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1545,7 +1545,7 @@ define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1565,7 +1565,7 @@ define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1579,7 +1579,7 @@ define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1599,7 +1599,7 @@ define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v, <
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1613,7 +1613,7 @@ define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v, <
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1633,7 +1633,7 @@ define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    li a1, 32
@@ -1647,7 +1647,7 @@ define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1667,7 +1667,7 @@ define signext i64 @vpreduce_add_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1681,7 +1681,7 @@ define signext i64 @vpreduce_add_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1699,7 +1699,7 @@ define signext i64 @vwpreduce_add_nxv2i32(i64 signext %s, <vscale x 2 x i32> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; RV32-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v9
@@ -1713,7 +1713,7 @@ define signext i64 @vwpreduce_add_nxv2i32(i64 signext %s, <vscale x 2 x i32> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV64-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v9
@@ -1733,7 +1733,7 @@ define signext i64 @vwpreduce_uadd_nxv2i32(i64 signext %s, <vscale x 2 x i32> %v
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; RV32-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v9
@@ -1747,7 +1747,7 @@ define signext i64 @vwpreduce_uadd_nxv2i32(i64 signext %s, <vscale x 2 x i32> %v
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v9, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; RV64-NEXT:    vwredsum.vs v9, v8, v9, v0.t
 ; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v9
@@ -1769,7 +1769,7 @@ define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1783,7 +1783,7 @@ define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1803,7 +1803,7 @@ define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1817,7 +1817,7 @@ define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1837,7 +1837,7 @@ define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1851,7 +1851,7 @@ define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1871,7 +1871,7 @@ define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1885,7 +1885,7 @@ define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1905,7 +1905,7 @@ define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1919,7 +1919,7 @@ define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1939,7 +1939,7 @@ define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v, <
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1953,7 +1953,7 @@ define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v, <
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -1973,7 +1973,7 @@ define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; RV32-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
 ; RV32-NEXT:    li a1, 32
@@ -1987,7 +1987,7 @@ define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v10
 ; RV64-NEXT:    ret
@@ -2007,7 +2007,7 @@ define signext i64 @vpreduce_add_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredsum.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2021,7 +2021,7 @@ define signext i64 @vpreduce_add_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredsum.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2039,7 +2039,7 @@ define signext i64 @vpwreduce_add_nxv4i32(i64 signext %s, <vscale x 4 x i32> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e32, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; RV32-NEXT:    vwredsum.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v10
@@ -2053,7 +2053,7 @@ define signext i64 @vpwreduce_add_nxv4i32(i64 signext %s, <vscale x 4 x i32> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; RV64-NEXT:    vwredsum.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v10
@@ -2073,7 +2073,7 @@ define signext i64 @vpwreduce_uadd_nxv4i32(i64 signext %s, <vscale x 4 x i32> %v
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e32, m2, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; RV32-NEXT:    vwredsumu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v10
@@ -2087,7 +2087,7 @@ define signext i64 @vpwreduce_uadd_nxv4i32(i64 signext %s, <vscale x 4 x i32> %v
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v10, a0
-; RV64-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; RV64-NEXT:    vwredsumu.vs v10, v8, v10, v0.t
 ; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v10
@@ -2109,7 +2109,7 @@ define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredmaxu.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2123,7 +2123,7 @@ define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredmaxu.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2143,7 +2143,7 @@ define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredmax.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2157,7 +2157,7 @@ define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredmax.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2177,7 +2177,7 @@ define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredminu.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2191,7 +2191,7 @@ define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredminu.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2211,7 +2211,7 @@ define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredmin.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2225,7 +2225,7 @@ define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredmin.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2245,7 +2245,7 @@ define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredand.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2259,7 +2259,7 @@ define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredand.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2279,7 +2279,7 @@ define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v, <
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredor.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2293,7 +2293,7 @@ define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v, <
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredor.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret
@@ -2313,7 +2313,7 @@ define signext i64 @vpreduce_xor_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; RV32-NEXT:    vredxor.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
 ; RV32-NEXT:    li a1, 32
@@ -2327,7 +2327,7 @@ define signext i64 @vpreduce_xor_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV64-NEXT:    vmv.s.x v12, a0
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vredxor.vs v12, v8, v12, v0.t
 ; RV64-NEXT:    vmv.x.s a0, v12
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
index 1fe075870b5fe..a66b27c8398f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
index df85bbaa2f981..e74e56215990b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
 define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
 define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
 define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -838,7 +838,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -861,7 +861,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -884,7 +884,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -907,7 +907,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,7 +930,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -953,7 +953,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
 define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index a8e48565cee01..432d758e778b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -781,7 +781,7 @@ body:             |
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
   ; CHECK-NEXT:   [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, implicit $vl, implicit $vtype
+  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
   ; CHECK-NEXT:   PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
   ; CHECK-NEXT:   PseudoRET
@@ -815,7 +815,7 @@ body:             |
     %21:vr = IMPLICIT_DEF
     %20:vr = PseudoVMV_S_X_M1 %21, %19, 1, 5
     %24:vr = IMPLICIT_DEF
-    %23:vr = PseudoVREDSUM_VS_M1 %24, %16, killed %20, 4, 5
+    %23:vr = PseudoVREDSUM_VS_M1 %24, %16, killed %20, 4, 5, 1
     PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
     PseudoRET
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index f9ba68d2ac3e8..4dcd46d292cb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -311,7 +311,7 @@ body:             |
     ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
-    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6 /* e64 */, implicit $vtype
     ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -319,7 +319,7 @@ body:             |
     %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
     %2:vr = PseudoVMV_V_I_M1 0, -1, 6
     %4:vr = IMPLICIT_DEF
-    %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6
+    %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6, 1
     %5:gpr = PseudoVMV_X_S_M1 killed %3, 6
     $x10 = COPY %5
     PseudoRET implicit $x10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
index 257c1d0b6f60d..dcc7b9071f27d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
index 2de9c95c7df74..a39a01b7a626d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
index 7958cc60f9a43..972cf285f440e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
index 58949fd5eedb9..0440b0b27251a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
@@ -10,7 +10,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -33,7 +33,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -56,7 +56,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -79,7 +79,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -125,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -148,7 +148,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -171,7 +171,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -194,7 +194,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -217,7 +217,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -240,7 +240,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -263,7 +263,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -286,7 +286,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -309,7 +309,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -332,7 +332,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -355,7 +355,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -378,7 +378,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -424,7 +424,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -447,7 +447,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -470,7 +470,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -493,7 +493,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -516,7 +516,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -539,7 +539,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -562,7 +562,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -585,7 +585,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -608,7 +608,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -631,7 +631,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -654,7 +654,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -677,7 +677,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -700,7 +700,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -723,7 +723,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -746,7 +746,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -769,7 +769,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -792,7 +792,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -815,7 +815,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:


        


More information about the llvm-commits mailing list